code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 151 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "โ"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 1 |
from heapq import heappop, heappush
import numpy as np
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = grid.shape
SCREAMING_SNAKE_CASE_ = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [(0, source)], set()
SCREAMING_SNAKE_CASE_ = np.full((rows, cols), np.inf )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = np.empty((rows, cols), dtype=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = None
while queue:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE_ = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase, (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE_ = dist + 1
SCREAMING_SNAKE_CASE_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
__UpperCAmelCase = "\n# Transformers ์ค์น ๋ฐฉ๋ฒ\n! pip install transformers datasets\n# ๋ง์ง๋ง ๋ฆด๋ฆฌ์ค ๋์ ์์ค์์ ์ค์นํ๋ ค๋ฉด, ์ ๋ช
๋ น์ ์ฃผ์์ผ๋ก ๋ฐ๊พธ๊ณ ์๋ ๋ช
๋ น์ ํด์ ํ์ธ์.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ) ->Optional[int]:
lowerCamelCase__ : List[Any] ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowerCamelCase__ : Dict =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('RGB' )
return image
def lowerCAmelCase_ ( snake_case_ : Any ) ->str:
lowerCamelCase__ : Tuple =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str ) ->Dict:
lowerCamelCase__ : Union[str, Any] =dct.pop(snake_case_ )
lowerCamelCase__ : int =val
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : List[Any] ) ->Any:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase__ : str =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCamelCase__ : Optional[int] =state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCamelCase__ : Dict =torch.cat((q_bias, torch.zeros_like(snake_case_ , requires_grad=snake_case_ ), v_bias) )
lowerCamelCase__ : Optional[int] =qkv_bias
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] ) ->Tuple:
lowerCamelCase__ : int =3_6_4 if 'coco' in model_name else 2_2_4
lowerCamelCase__ : Tuple =BlipaVisionConfig(image_size=snake_case_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase__ : Any =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase__ : str =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case_ ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase__ : Optional[int] =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase__ : str =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowerCamelCase__ : Union[str, Any] =BlipaConfig(vision_config=snake_case_ , text_config=snake_case_ )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict=None , snake_case_ : int=False ) ->Optional[int]:
lowerCamelCase__ : Optional[int] =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowerCamelCase__ : Any =tokenizer('\n' , add_special_tokens=snake_case_ ).input_ids[0]
lowerCamelCase__ , lowerCamelCase__ : Dict =get_blipa_config(snake_case_ , eos_token_id=snake_case_ )
lowerCamelCase__ : Tuple =BlipaForConditionalGeneration(snake_case_ ).eval()
lowerCamelCase__ : int ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowerCamelCase__ , lowerCamelCase__ : Dict =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCamelCase__ : Any ='cuda' if torch.cuda.is_available() else 'cpu'
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple =load_model_and_preprocess(
name=snake_case_ , model_type=snake_case_ , is_eval=snake_case_ , device=snake_case_ )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCamelCase__ : List[str] =original_model.state_dict()
lowerCamelCase__ : List[Any] =create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase__ : Any =state_dict.pop(snake_case_ )
if key.startswith('Qformer.bert' ):
lowerCamelCase__ : str =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCamelCase__ : int =key.replace('self' , 'attention' )
if "opt_proj" in key:
lowerCamelCase__ : int =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCamelCase__ : Dict =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowerCamelCase__ : Optional[int] =key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowerCamelCase__ : Any =key.replace('t5' , 'language' )
lowerCamelCase__ : List[Any] =val
# read in qv biases
read_in_q_v_bias(snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : int =hf_model.load_state_dict(snake_case_ , strict=snake_case_ )
assert len(snake_case_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase__ : Tuple =load_demo_image()
lowerCamelCase__ : Optional[Any] =vis_processors['eval'](snake_case_ ).unsqueeze(0 ).to(snake_case_ )
lowerCamelCase__ : List[Any] =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case_ )
# create processor
lowerCamelCase__ : List[Any] =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case_ , image_std=snake_case_ )
lowerCamelCase__ : int =BlipaProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
lowerCamelCase__ : str =processor(images=snake_case_ , return_tensors='pt' ).pixel_values.to(snake_case_ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case_ , snake_case_ )
original_model.to(snake_case_ )
hf_model.to(snake_case_ )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase__ : Optional[int] =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowerCamelCase__ : Optional[int] =hf_model(snake_case_ , snake_case_ ).logits
else:
lowerCamelCase__ : str =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowerCamelCase__ : Dict =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
lowerCamelCase__ : Optional[int] =hf_model(snake_case_ , snake_case_ , labels=snake_case_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase__ : str =torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case_ )
assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase__ : List[str] =torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case_ )
else:
# cast to same type
lowerCamelCase__ : Dict =logits.dtype
assert torch.allclose(original_logits.to(snake_case_ ) , snake_case_ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowerCamelCase__ : int =''
lowerCamelCase__ : Optional[Any] =tokenizer(snake_case_ , return_tensors='pt' ).input_ids.to(snake_case_ )
lowerCamelCase__ : Dict =original_model.generate({'image': original_pixel_values} )
lowerCamelCase__ : Any =hf_model.generate(
snake_case_ , snake_case_ , do_sample=snake_case_ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case_ )
lowerCamelCase__ : str =input_ids.shape[1]
lowerCamelCase__ : Optional[Any] =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case_ )
lowerCamelCase__ : int =[text.strip() for text in output_text]
print('HF generation:' , snake_case_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
lowerCAmelCase = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 126 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids
| 126 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__a = (3, 9, -11, 0, 7, 5, 1, -1)
__a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase : Node | None = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
_UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head )
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.head
while node:
yield node.data
_UpperCAmelCase : List[str] = node.next_node
def __len__( self : Any ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ):
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[int] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
lowerCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , x.transpose() ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = np.random.randn(3 , 4 )
lowerCAmelCase : Any = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , transpose(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Tuple = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , transpose(UpperCamelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = np.random.randn(3 , 4 )
lowerCAmelCase : str = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , transpose(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : List[Any] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , transpose(UpperCamelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Union[str, Any] = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , np.asarray(transpose(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Dict = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , np.reshape(UpperCamelCase_ , (4, 3) ) ) )
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , np.reshape(UpperCamelCase_ , (1_2, 5) ) ) )
@require_torch
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , reshape(UpperCamelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , reshape(UpperCamelCase_ , (1_2, 5) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : str = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , reshape(UpperCamelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : int = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Tuple = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , reshape(UpperCamelCase_ , (1_2, 5) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
lowerCAmelCase : Union[str, Any] = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , np.asarray(reshape(UpperCamelCase_ , (4, 3) ) ) ) )
lowerCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : int = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , np.asarray(reshape(UpperCamelCase_ , (1_2, 5) ) ) ) )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , np.squeeze(UpperCamelCase_ ) ) )
lowerCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , np.squeeze(UpperCamelCase_ , axis=2 ) ) )
@require_torch
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , squeeze(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Tuple = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , squeeze(UpperCamelCase_ , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : List[str] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , squeeze(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Optional[int] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , squeeze(UpperCamelCase_ , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : int = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : str = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , np.asarray(squeeze(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : str = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , np.asarray(squeeze(UpperCamelCase_ , axis=2 ) ) ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , np.expand_dims(UpperCamelCase_ , axis=1 ) ) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = np.random.randn(3 , 4 )
lowerCAmelCase : Any = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , expand_dims(UpperCamelCase_ , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[int] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , expand_dims(UpperCamelCase_ , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
lowerCAmelCase : Tuple = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase_ , axis=1 ) ) ) )
| 60 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''DPTFeatureExtractor''']
lowerCamelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase_ = {'''bert_for_seq_generation''': 512}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = []
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<::::>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : List[Any] = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 253 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=1e-12 ):
__SCREAMING_SNAKE_CASE : int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
return jnp.matmul(lowercase__ , norm_emb_a.T )
class _lowercase ( nn.Module ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : CLIPConfig
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = FlaxCLIPVisionModule(self.config.vision_config )
__SCREAMING_SNAKE_CASE : str = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype )
__SCREAMING_SNAKE_CASE : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : Tuple = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__SCREAMING_SNAKE_CASE : str = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
__SCREAMING_SNAKE_CASE : Dict = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self :Any , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(lowerCAmelCase__ )[1]
__SCREAMING_SNAKE_CASE : Any = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Any = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__SCREAMING_SNAKE_CASE : Tuple = 0.0
__SCREAMING_SNAKE_CASE : Any = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : str = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ )
# Use a lower threshold if an image has any special care concept
__SCREAMING_SNAKE_CASE : int = is_special_care * 0.01
__SCREAMING_SNAKE_CASE : List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.round(lowerCAmelCase__ , 3 )
__SCREAMING_SNAKE_CASE : int = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = CLIPConfig
SCREAMING_SNAKE_CASE__ : int = '''clip_input'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self :List[Any] , lowerCAmelCase__ :CLIPConfig , lowerCAmelCase__ :Optional[Tuple] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :jnp.dtype = jnp.floataa , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]:
if input_shape is None:
__SCREAMING_SNAKE_CASE : Optional[int] = (1, 224, 224, 3)
__SCREAMING_SNAKE_CASE : int = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :jax.random.KeyArray , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :FrozenDict = None ) -> FrozenDict:
# init input tensor
__SCREAMING_SNAKE_CASE : Optional[int] = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = jax.random.split(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = {'''params''': params_rng, '''dropout''': dropout_rng}
__SCREAMING_SNAKE_CASE : Dict = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )['''params''']
return random_params
def __call__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :dict = None , ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = DDIMPipeline
UpperCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
UpperCAmelCase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
SCREAMING_SNAKE_CASE__ : int = DDIMScheduler()
SCREAMING_SNAKE_CASE__ : str = {"unet": unet, "scheduler": scheduler}
return components
def A_ ( self : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str=0 ) -> Tuple:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 3_2, 3_2, 3) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self : Tuple ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "google/ddpm-cifar10-32"
SCREAMING_SNAKE_CASE__ : Dict = UNetaDModel.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler()
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMPipeline(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
ddim.to(_UpperCAmelCase )
ddim.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = ddim(generator=_UpperCAmelCase, eta=0.0, output_type="numpy" ).images
SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "google/ddpm-ema-bedroom-256"
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDModel.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = DDIMPipeline(unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
ddpm.to(_UpperCAmelCase )
ddpm.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = ddpm(generator=_UpperCAmelCase, output_type="numpy" ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 191 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : str = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 191 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A_ = logging.get_logger(__name__)
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls: Tuple ):
'''simple docstring'''
return f"`pip install {cls.pip_package or cls.name}`"
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "optuna"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ):
'''simple docstring'''
return run_hp_search_optuna(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Any ):
'''simple docstring'''
return default_hp_space_optuna(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "ray"
lowercase__ = "'ray[tune]'"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ):
'''simple docstring'''
return run_hp_search_ray(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Tuple ):
'''simple docstring'''
return default_hp_space_ray(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "sigopt"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ):
'''simple docstring'''
return run_hp_search_sigopt(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: List[str] ):
'''simple docstring'''
return default_hp_space_sigopt(a_ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "wandb"
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ):
'''simple docstring'''
return run_hp_search_wandb(a_, a_, a_, **a_ )
def UpperCamelCase_ ( self: str, a_: Any ):
'''simple docstring'''
return default_hp_space_wandb(a_ )
A_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
_snake_case : Any = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 64 | 0 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()]
a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()][: len(UpperCAmelCase )]
a_ = calculate_rouge(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
if save_path is not None:
save_json(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 303 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( a ):
def __init__( self : Any , _snake_case : WhisperForConditionalGeneration , _snake_case : WhisperProcessor , _snake_case : AutoencoderKL , _snake_case : CLIPTextModel , _snake_case : CLIPTokenizer , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _snake_case : StableDiffusionSafetyChecker , _snake_case : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''')
self.register_modules(
speech_model=_snake_case , speech_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , feature_extractor=_snake_case , )
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[Union[str, int]] = "auto"):
"""simple docstring"""
if slice_size == "auto":
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.enable_attention_slicing(_snake_case)
@torch.no_grad()
def __call__( self : Optional[Any] , _snake_case : str , _snake_case : Optional[Any]=16000 , _snake_case : int = 512 , _snake_case : int = 512 , _snake_case : int = 50 , _snake_case : float = 7.5 , _snake_case : Optional[Union[str, List[str]]] = None , _snake_case : Optional[int] = 1 , _snake_case : float = 0.0 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = self.speech_processor.feature_extractor(
_snake_case , return_tensors='''pt''' , sampling_rate=_snake_case).input_features.to(self.device)
UpperCAmelCase_ = self.speech_model.generate(_snake_case , max_length=480000)
UpperCAmelCase_ = self.speech_processor.tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , normalize=_snake_case)[
0
]
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = 1
elif isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = len(_snake_case)
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_snake_case)}""")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_snake_case , _snake_case) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_snake_case)}.""")
# get prompt text embeddings
UpperCAmelCase_ = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCAmelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""")
UpperCAmelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = text_embeddings.shape
UpperCAmelCase_ = text_embeddings.repeat(1 , _snake_case , 1)
UpperCAmelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _snake_case , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ = 42
if negative_prompt is None:
UpperCAmelCase_ = [''''''] * batch_size
elif type(_snake_case) is not type(_snake_case):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case)} !="""
F""" {type(_snake_case)}.""")
elif isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [negative_prompt]
elif batch_size != len(_snake_case):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_snake_case)}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''')
else:
UpperCAmelCase_ = negative_prompt
UpperCAmelCase_ = text_input_ids.shape[-1]
UpperCAmelCase_ = self.tokenizer(
_snake_case , padding='''max_length''' , max_length=_snake_case , truncation=_snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ = uncond_embeddings.shape[1]
UpperCAmelCase_ = uncond_embeddings.repeat(1 , _snake_case , 1)
UpperCAmelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_ = torch.randn(_snake_case , generator=_snake_case , device='''cpu''' , dtype=_snake_case).to(
self.device)
else:
UpperCAmelCase_ = torch.randn(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case)
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""")
UpperCAmelCase_ = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(_snake_case)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_ = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for i, t in enumerate(self.progress_bar(_snake_case)):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case , encoder_hidden_states=_snake_case).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2)
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase_ = self.vae.decode(_snake_case).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_snake_case , nsfw_content_detected=_snake_case)
| 51 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__UpperCAmelCase = random.Random()
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=1.0 , __magic_name__ : Any=None , __magic_name__ : int=None ):
if rng is None:
a__: str =global_rng
a__: Dict =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , _a : Any , _a : Union[str, Any]=7 , _a : Any=4_0_0 , _a : Optional[int]=2_0_0_0 , _a : Tuple=1 , _a : str=0.0 , _a : Optional[Any]=1_6_0_0_0 , _a : List[str]=True , _a : Dict=8_0 , _a : List[str]=1_6 , _a : Union[str, Any]=6_4 , _a : Any="hann_window" , _a : Dict=8_0 , _a : int=7_6_0_0 , _a : List[str]=1e-10 , _a : int=True , ):
a__: Tuple =parent
a__: Optional[int] =batch_size
a__: Dict =min_seq_length
a__: Any =max_seq_length
a__: Dict =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: str =feature_size
a__: List[Any] =padding_value
a__: Union[str, Any] =sampling_rate
a__: List[str] =do_normalize
a__: Any =num_mel_bins
a__: Dict =hop_length
a__: Optional[int] =win_length
a__: List[Any] =win_function
a__: int =fmin
a__: List[str] =fmax
a__: List[str] =mel_floor
a__: str =return_attention_mask
def _lowerCamelCase ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _lowerCamelCase ( self : str , _a : List[str]=False , _a : Optional[Any]=False ):
def _flatten(_a : List[str] ):
return list(itertools.chain(*_a ) )
if equal_length:
a__: int =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__: Any =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def _lowerCamelCase ( self : int , _a : Union[str, Any]=False , _a : str=False ):
if equal_length:
a__: Union[str, Any] =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__: Optional[Any] =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__: Optional[int] =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase__ ( _a , unittest.TestCase ):
_lowerCAmelCase = SpeechTaFeatureExtractor
def _lowerCamelCase ( self : Optional[Any] ):
a__: List[str] =SpeechTaFeatureExtractionTester(self )
def _lowerCamelCase ( self : Optional[int] , _a : Union[str, Any] ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def _lowerCamelCase ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: Union[str, Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Union[str, Any] =[np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
a__: Union[str, Any] =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a__: List[Any] =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: Tuple =feat_extract(_a , return_tensors="np" ).input_values
a__: int =feat_extract(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : int ):
a__: Optional[int] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Union[str, Any] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =["longest", "max_length", "do_not_pad"]
a__: List[str] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: Optional[int] =feat_extract(_a , padding=_a , max_length=_a , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Tuple =range(8_0_0 , 1_4_0_0 , 2_0_0 )
a__: List[Any] =[floats_list((1, x) )[0] for x in lengths]
a__: Optional[int] =["longest", "max_length", "do_not_pad"]
a__: Optional[Any] =[None, 1_6_0_0, None]
for max_length, padding in zip(_a , _a ):
a__: List[Any] =feat_extract(_a , max_length=_a , padding=_a )
a__: Optional[Any] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def _lowerCamelCase ( self : str ):
a__: List[str] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: str =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
a__: Any =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: int =feat_extract(
_a , truncation=_a , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
a__: int =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
a__: int =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Tuple =feat_extract(
_a , truncation=_a , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
a__: Optional[int] =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def _lowerCamelCase ( self : Any ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__: Dict =np.random.rand(1_0_0 ).astype(np.floataa )
a__: str =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__: int =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _lowerCamelCase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
a__: int =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__: Optional[int] =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a__: Any =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
a__: Dict =feature_extractor(audio_target=_a , padding=_a , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__: Tuple =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a__: str =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
a__: List[Any] =feature_extractor(_a , return_tensors="np" ).input_values
a__: Union[str, Any] =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a__: Optional[int] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a__: str =np.asarray(_a )
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
a__: int =feature_extractor(_a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def _lowerCamelCase ( self : str ):
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: Dict =self.feature_extraction_class(**self.feat_extract_dict )
a__: int =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
a__: Any =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: List[str] =BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a__: List[Any] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
a__: int =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a__: Optional[int] =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__: Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _lowerCamelCase ( self : str ):
a__: List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
a__: Tuple =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[int] =BatchFeature({input_name: speech_inputs} )
a__: Tuple =feat_extract.num_mel_bins # hack!
a__: int =feat_extract.pad(_a , padding="longest" , return_tensors="np" )[input_name]
a__: Dict =feat_extract.pad(_a , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCamelCase ( self : List[str] ):
a__: Dict =self.feat_extract_dict
a__: Optional[Any] =True
a__: Any =self.feature_extraction_class(**_a )
a__: List[str] =self.feat_extract_tester.prepare_inputs_for_target()
a__: Optional[Any] =[len(_a ) for x in speech_inputs]
a__: Any =feat_extract.model_input_names[0]
a__: Union[str, Any] =BatchFeature({input_name: speech_inputs} )
a__: List[Any] =feat_extract.num_mel_bins # hack!
a__: Any =feat_extract.pad(_a , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def _lowerCamelCase ( self : Any ):
a__: Tuple =self.feat_extract_dict
a__: Dict =True
a__: Tuple =self.feature_extraction_class(**_a )
a__: str =self.feat_extract_tester.prepare_inputs_for_target()
a__: List[str] =[len(_a ) for x in speech_inputs]
a__: Optional[int] =feat_extract.model_input_names[0]
a__: Optional[Any] =BatchFeature({input_name: speech_inputs} )
a__: List[str] =min(_a )
a__: Optional[int] =feat_extract.num_mel_bins # hack!
a__: Optional[Any] =feat_extract.pad(
_a , padding="max_length" , max_length=_a , truncation=_a , return_tensors="np" )
self.assertIn("attention_mask" , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _lowerCamelCase ( self : List[str] , _a : List[str] ):
from datasets import load_dataset
a__: List[Any] =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__: Union[str, Any] =ds.sort("id" ).select(range(_a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
# fmt: off
a__: Optional[Any] =torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
a__: Tuple =self._load_datasamples(1 )
a__: Tuple =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _a , atol=1e-6 ) )
def _lowerCamelCase ( self : int ):
# fmt: off
a__: List[Any] =torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
a__: List[Any] =self._load_datasamples(1 )
a__: List[str] =SpeechTaFeatureExtractor()
a__: Any =feature_extractor(audio_target=_a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _a , atol=1e-4 ) )
| 42 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase__ ( _a ):
@require_torch
def _lowerCamelCase ( self : Union[str, Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Dict ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: Dict ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: Tuple ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Optional[int] =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: str =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Union[str, Any] ="1"
a__: Any =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : str ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Optional[int] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Tuple ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: List[Any] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: str ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Any =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Union[str, Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Tuple ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__: str ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__: int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__: Union[str, Any] =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Optional[Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__: int =[sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Tuple ="1"
a__: Dict =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Dict ="\nfrom transformers import pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__: List[str] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__: Dict =self.get_env()
a__: Optional[Any] ="1"
a__: Dict =[sys.executable, "-c", "\n".join([load, mock, run] )]
a__: Optional[int] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] ="\nfrom transformers import AutoModel\n "
a__: str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__: Tuple =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Any =self.get_env()
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: List[Any] ="1"
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 42 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int = True ,SCREAMING_SNAKE_CASE__ : Dict = None ,SCREAMING_SNAKE_CASE__ : List[Any] = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = True ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = True ,SCREAMING_SNAKE_CASE__ : str = 1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Optional[int] = True ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Tuple = None ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = True ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
super().__init__(**__lowerCAmelCase)
__lowerCamelCase : Tuple = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCamelCase : str = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase)
__lowerCamelCase : Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCamelCase : str = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase ,param_name='crop_size')
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : Any = size
__lowerCamelCase : int = resample
__lowerCamelCase : Optional[int] = do_center_crop
__lowerCamelCase : Any = crop_size
__lowerCamelCase : Optional[Any] = do_rescale
__lowerCamelCase : Optional[Any] = rescale_factor
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase : List[Any] = do_convert_rgb
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Any = None ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : Union[str, Any] = get_size_dict(__lowerCAmelCase ,default_to_square=__lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
__lowerCamelCase : Optional[int] = get_resize_output_image_size(__lowerCAmelCase ,size=size['shortest_edge'] ,default_to_square=__lowerCAmelCase)
return resize(__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict = None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : Tuple = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(__lowerCAmelCase ,size=(size['height'], size['width']) ,data_format=__lowerCAmelCase ,**__lowerCAmelCase)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
return rescale(__lowerCAmelCase ,scale=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
return normalize(__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase ,data_format=__lowerCAmelCase ,**__lowerCAmelCase)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = None ,SCREAMING_SNAKE_CASE__ : Any = None ,SCREAMING_SNAKE_CASE__ : List[Any] = None ,SCREAMING_SNAKE_CASE__ : Any = None ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Any = None ,SCREAMING_SNAKE_CASE__ : List[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Any = None ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Dict ,):
__lowerCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Dict = size if size is not None else self.size
__lowerCamelCase : Optional[int] = get_size_dict(__lowerCAmelCase ,param_name='size' ,default_to_square=__lowerCAmelCase)
__lowerCamelCase : Any = resample if resample is not None else self.resample
__lowerCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Any = get_size_dict(__lowerCAmelCase ,param_name='crop_size' ,default_to_square=__lowerCAmelCase)
__lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Any = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : List[Any] = image_std if image_std is not None else self.image_std
__lowerCamelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase : Union[str, Any] = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase : Optional[Any] = [convert_to_rgb(__lowerCAmelCase) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase : List[Any] = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_resize:
__lowerCamelCase : int = [self.resize(image=__lowerCAmelCase ,size=__lowerCAmelCase ,resample=__lowerCAmelCase) for image in images]
if do_center_crop:
__lowerCamelCase : Any = [self.center_crop(image=__lowerCAmelCase ,size=__lowerCAmelCase) for image in images]
if do_rescale:
__lowerCamelCase : Union[str, Any] = [self.rescale(image=__lowerCAmelCase ,scale=__lowerCAmelCase) for image in images]
if do_normalize:
__lowerCamelCase : int = [self.normalize(image=__lowerCAmelCase ,mean=__lowerCAmelCase ,std=__lowerCAmelCase) for image in images]
__lowerCamelCase : List[Any] = [to_channel_dimension_format(__lowerCAmelCase ,__lowerCAmelCase) for image in images]
__lowerCamelCase : str = {'pixel_values': images}
return BatchFeature(data=__lowerCAmelCase ,tensor_type=__lowerCAmelCase)
| 360 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = '''vit_msn'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : int=1E-06 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : int=3 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Union[str, Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : List[str] = qkv_bias
| 113 | 0 |
'''simple docstring'''
__lowerCAmelCase = 'Input must be a string of 8 numbers plus letter'
__lowerCAmelCase = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = spanish_id.replace("""-""" , """""" ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
_snake_case = int(spanish_id_clean[0:8] )
_snake_case = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = f"""class {class_name}("""
_snake_case = f"""{4 * " "}def {test_name}("""
_snake_case = f"""{8 * " "}{correct_line.split()[0]}"""
_snake_case = f"""{16 * " "}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
_snake_case = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
_snake_case = _snake_case = _snake_case = _snake_case = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
_snake_case, _snake_case, _snake_case, _snake_case = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__lowerCAmelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 341 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Optional[int]:
return EnvironmentCommand()
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Any:
return EnvironmentCommand(args.accelerate_config_file )
class _lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( UpperCamelCase : ArgumentParser ):
'''simple docstring'''
_snake_case : Dict = parser.add_parser('env' )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
'--accelerate-config_file' , default=UpperCamelCase__ , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : str , UpperCamelCase : Any , *UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Tuple = accelerate_config_file
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = 'not installed'
if is_safetensors_available():
import safetensors
_snake_case : Dict = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
_snake_case : str = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_snake_case : Tuple = 'not installed'
_snake_case : Union[str, Any] = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_snake_case : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
_snake_case : List[str] = load_config_from_file(self._accelerate_config_file ).to_dict()
_snake_case : Any = (
'\n'.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f"""\t{accelerate_config}"""
)
_snake_case : Dict = 'not installed'
_snake_case : List[Any] = 'NA'
if is_torch_available():
import torch
_snake_case : int = torch.__version__
_snake_case : Optional[int] = torch.cuda.is_available()
_snake_case : Any = 'not installed'
_snake_case : Any = 'NA'
if is_tf_available():
import tensorflow as tf
_snake_case : Any = tf.__version__
try:
# deprecated in v2.1
_snake_case : Optional[int] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_snake_case : str = bool(tf.config.list_physical_devices('GPU' ) )
_snake_case : Optional[int] = 'not installed'
_snake_case : Tuple = 'not installed'
_snake_case : int = 'not installed'
_snake_case : int = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
_snake_case : str = flax.__version__
_snake_case : Dict = jax.__version__
_snake_case : Any = jaxlib.__version__
_snake_case : List[Any] = jax.lib.xla_bridge.get_backend().platform
_snake_case : Dict = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f"""{safetensors_version}""",
'Accelerate version': f"""{accelerate_version}""",
'Accelerate config': f"""{accelerate_config_str}""",
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Tensorflow version (GPU?)': f"""{tf_version} ({tf_cuda_available})""",
'Flax version (CPU?/GPU?/TPU?)': f"""{flax_version} ({jax_backend})""",
'Jax version': f"""{jax_version}""",
'JaxLib version': f"""{jaxlib_version}""",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def UpperCamelCase_ ( UpperCamelCase : Any ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 369 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
# Initialise PyTorch model
_snake_case : Dict = RemBertConfig.from_json_file(lowerCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = RemBertModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase ) )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 260 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase_ :
'''simple docstring'''
_snake_case = LEDConfig
_snake_case = {}
_snake_case = '''gelu'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=4 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__lowerCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__lowerCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A__ ( self ) -> int:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__lowerCAmelCase = prepare_led_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = tf.concat(
[tf.zeros_like(snake_case_ )[:, :-1], tf.ones_like(snake_case_ )[:, -1:]] , axis=-1 , )
__lowerCAmelCase = global_attention_mask
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = TFLEDModel(config=snake_case_ ).get_decoder()
__lowerCAmelCase = inputs_dict["""input_ids"""]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
__lowerCAmelCase = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_snake_case = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_snake_case = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Any:
__lowerCAmelCase = TFLEDModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ )
def A__ ( self ) -> str:
self.config_tester.run_common_tests()
def A__ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = tf.zeros_like(inputs_dict["""attention_mask"""] )
__lowerCAmelCase = 2
__lowerCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
__lowerCAmelCase = True
__lowerCAmelCase = self.model_tester.seq_length
__lowerCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(snake_case_ ):
__lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(snake_case_ ):
__lowerCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
__lowerCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def A__ ( self ) -> int:
pass
def A__ ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase (_lowerCAmelCase ):
return tf.constant(_lowerCAmelCase , dtype=tf.intaa )
SCREAMING_SNAKE_CASE_ = 1E-4
@slow
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> str:
__lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
__lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__lowerCAmelCase = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__lowerCAmelCase = model(**snake_case_ )[0]
__lowerCAmelCase = (1, 1_024, 768)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__lowerCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-3 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
__lowerCAmelCase = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__lowerCAmelCase = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__lowerCAmelCase = prepare_led_inputs_dict(model.config , snake_case_ , snake_case_ )
__lowerCAmelCase = model(**snake_case_ )[0]
__lowerCAmelCase = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , snake_case_ )
# change to expected output here
__lowerCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-3 , rtol=1e-3 )
| 301 |
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a__ ( __lowercase , __lowercase=None ) -> Optional[int]:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( __lowercase , __lowercase=None ) -> Dict:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase )
_A = result.headers["Location"]
_A = requests.get(__lowercase , allow_redirects=__lowercase )
_A = os.path.join(__lowercase , f"""{artifact_name}.zip""" )
with open(__lowercase , "wb" ) as fp:
fp.write(response.content )
def a__ ( __lowercase , __lowercase=None ) -> List[Any]:
_A = []
_A = []
_A = None
with zipfile.ZipFile(__lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase ) as f:
for line in f:
_A = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A = line[: line.index(": " )]
_A = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_A = line[len("FAILED " ) :]
failed_tests.append(__lowercase )
elif filename == "job_name.txt":
_A = line
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` """
f"""and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem." )
_A = None
if job_name and job_links:
_A = job_links.get(__lowercase , __lowercase )
# A list with elements of the form (line of error, error, failed test)
_A = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )]
return result
def a__ ( __lowercase , __lowercase=None ) -> Union[str, Any]:
_A = []
_A = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) )
return errors
def a__ ( __lowercase , __lowercase=None ) -> Dict:
_A = Counter()
counter.update([x[1] for x in logs] )
_A = counter.most_common()
_A = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def a__ ( __lowercase ) -> str:
_A = test.split("::" )[0]
if test.startswith("tests/models/" ):
_A = test.split("/" )[2]
else:
_A = None
return test
def a__ ( __lowercase , __lowercase=None ) -> int:
_A = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A = [x for x in logs if x[2] is not None]
_A = {x[2] for x in logs}
_A = {}
for test in tests:
_A = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A = counter.most_common()
_A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A = sum(error_counts.values() )
if n_errors > 0:
_A = {"count": n_errors, "errors": error_counts}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def a__ ( __lowercase ) -> str:
_A = "| no. | error | status |"
_A = "|-:|:-|:-|"
_A = [header, sep]
for error in reduced_by_error:
_A = reduced_by_error[error]["count"]
_A = f"""| {count} | {error[:100]} | |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
def a__ ( __lowercase ) -> Optional[int]:
_A = "| model | no. of errors | major error | count |"
_A = "|-:|-:|-:|-:|"
_A = [header, sep]
for model in reduced_by_model:
_A = reduced_by_model[model]["count"]
_A , _A = list(reduced_by_model[model]["errors"].items() )[0]
_A = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ = get_job_links(args.workflow_run_id, token=args.token)
a_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ = k.find(" / ")
a_ = k[index + len(" / ") :]
a_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ = reduce_by_error(errors)
a_ = reduce_by_model(errors)
a_ = make_github_table(reduced_by_error)
a_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 163 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'ctrl'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , a__ : Union[str, Any]=24_65_34 , a__ : int=2_56 , a__ : Any=12_80 , a__ : Optional[int]=81_92 , a__ : Union[str, Any]=48 , a__ : Optional[int]=16 , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Optional[int]=1E-6 , a__ : Optional[int]=0.0_2 , a__ : Tuple=True , **a__ : List[Any] , ) -> Tuple:
'''simple docstring'''
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = dff
_A = resid_pdrop
_A = embd_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
super().__init__(**a__ )
| 163 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_snake_case = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Dict = size if size is not None else {"shortest_edge": 224}
_lowercase : List[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
_lowercase : List[str] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_UpperCamelCase , param_name="size" , default_to_square=_UpperCamelCase )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
_lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : List[Any] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[int] = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : List[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 250 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( __magic_name__ ):
lowercase__ = FileLock(str(tmpdir / "foo.lock" ) )
lowercase__ = FileLock(str(tmpdir / "foo.lock" ) )
lowercase__ = 0.01
with locka.acquire():
with pytest.raises(__magic_name__ ):
lowercase__ = time.time()
locka.acquire(__magic_name__ )
assert time.time() - _start > timeout
def _A ( __magic_name__ ):
lowercase__ = "a" * 1000 + ".lock"
lowercase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__magic_name__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__magic_name__ ):
locka.acquire(0 )
| 201 |
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_UpperCamelCase = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = tokenizer
__snake_case : Any = dataset
__snake_case : Optional[int] = len(UpperCAmelCase__ ) if n_tasks is None else n_tasks
__snake_case : List[Any] = n_copies
def __iter__( self ) -> Tuple:
'''simple docstring'''
__snake_case : Any = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
__snake_case : Dict = self.tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = start_length
__snake_case : List[str] = eof_strings
__snake_case : str = tokenizer
def __call__( self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__snake_case : List[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase__ )
def lowerCAmelCase__( lowercase : List[Any] ) -> int:
__snake_case : List[str] = re.split("(%s)" % "|".join(UpperCamelCase_ ) , UpperCamelCase_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCAmelCase__( lowercase : Dict , lowercase : Dict , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any]=20 , **lowercase : int ) -> int:
__snake_case : List[Any] = defaultdict(UpperCamelCase_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase_ ) ):
with torch.no_grad():
__snake_case : Optional[Any] = batch["ids"].shape[-1]
__snake_case : int = accelerator.unwrap_model(UpperCamelCase_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=UpperCamelCase_ , **UpperCamelCase_ )
# each task is generated batch_size times
__snake_case : Dict = batch["task_id"].repeat(UpperCamelCase_ )
__snake_case : List[str] = accelerator.pad_across_processes(
UpperCamelCase_ , dim=1 , pad_index=tokenizer.pad_token_id )
__snake_case , __snake_case : Tuple = accelerator.gather((generated_tokens, generated_tasks) )
__snake_case : Tuple = generated_tokens.cpu().numpy()
__snake_case : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase_ , UpperCamelCase_ ):
gen_token_dict[task].append(UpperCamelCase_ )
__snake_case : Dict = [[] for _ in range(UpperCamelCase_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__snake_case : Optional[int] = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
code_gens[task].append(remove_last_block(UpperCamelCase_ ) )
return code_gens
def lowerCAmelCase__( ) -> Dict:
__snake_case : Any = HfArgumentParser(UpperCamelCase_ )
__snake_case : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__snake_case : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__snake_case : Optional[int] = "false"
if args.num_workers is None:
__snake_case : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__snake_case : str = Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase_ )
# Load model and tokenizer
__snake_case : str = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case : Optional[Any] = tokenizer.eos_token
__snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__snake_case : Dict = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase_ , UpperCamelCase_ )] ),
}
# Load evaluation dataset and metric
__snake_case : Tuple = load_dataset("openai_humaneval" )
__snake_case : List[str] = load_metric("code_eval" )
__snake_case : List[str] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
__snake_case : int = args.n_samples // args.batch_size
__snake_case : Optional[int] = TokenizedDataset(UpperCamelCase_ , human_eval["test"] , n_copies=UpperCamelCase_ , n_tasks=UpperCamelCase_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__snake_case : Any = DataLoader(UpperCamelCase_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__snake_case : Dict = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
__snake_case , __snake_case : Tuple = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ )
__snake_case : Dict = complete_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , n_tasks=UpperCamelCase_ , batch_size=args.batch_size , **UpperCamelCase_ , )
if accelerator.is_main_process:
__snake_case : List[Any] = []
for task in tqdm(range(UpperCamelCase_ ) ):
__snake_case : Tuple = human_eval["test"][task]["test"]
__snake_case : Optional[Any] = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
__snake_case , __snake_case : int = code_eval_metric.compute(
references=UpperCamelCase_ , predictions=UpperCamelCase_ , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 326 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant โ (H bar), speed of light C, value of
# Pi and the function
_UpperCamelCase: Optional[Any] = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of โ : J * s
_UpperCamelCase: Any = 3e8 # unit of c : m * s^-1
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowercase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : List[Any] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase : List[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
lowercase : str = sylvester(number - 1 )
lowercase : Union[str, Any] = num - 1
lowercase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 53 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__, lowerCAmelCase__):
raise Exception('''join() accepts only strings to be joined''')
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 174 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowerCAmelCase__ ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( lowerCAmelCase__ : list ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__a = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 0 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
_UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def snake_case (A_ :str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a : Optional[int] = model_type_to_module_name(lowerCamelCase__ )
a : List[Any] = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowerCamelCase__ , lowerCamelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase__ , '__name__' , lowerCamelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a : str = importlib.import_module('transformers' )
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
return getattr(lowerCamelCase__ , lowerCamelCase__ )
return None
def snake_case (A_ :Union[str, os.PathLike] , A_ :Optional[Union[str, os.PathLike]] = None , A_ :bool = False , A_ :bool = False , A_ :Optional[Dict[str, str]] = None , A_ :Optional[Union[bool, str]] = None , A_ :Optional[str] = None , A_ :bool = False , **A_ :Optional[int] , ):
'''simple docstring'''
a : Dict = get_file_from_repo(
lowerCamelCase__ , lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , resume_download=lowerCamelCase__ , proxies=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , local_files_only=lowerCamelCase__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCamelCase__ , encoding='utf-8' ) as reader:
return json.load(lowerCamelCase__ )
class snake_case :
def __init__( self : Optional[int] ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(A )
def lowerCamelCase__ ( cls : Union[str, Any] , A : List[str] , **A : int ):
'''simple docstring'''
a : str = kwargs.pop('config' , A )
a : Dict = kwargs.pop('trust_remote_code' , A )
a : List[str] = True
a, a : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
a : List[Any] = config_dict.get('feature_extractor_type' , A )
a : Tuple = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
a : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
a : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
a : str = getattr(A , 'feature_extractor_type' , A )
if hasattr(A , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
a : Optional[int] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
a : Any = feature_extractor_class_from_name(A )
a : Dict = feature_extractor_auto_map is not None
a : int = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
a : str = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
a : Any = get_class_from_dynamic_module(
A , A , **A )
a : List[str] = kwargs.pop('code_revision' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
a : Dict = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( A : List[str] , A : List[Any] ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 350 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BartphoTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a : Any = ['โThis', 'โis', 'โa', 'โt', 'est']
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : int = {'unk_token': '<unk>'}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a : Optional[int] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Dict , **A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Dict ):
'''simple docstring'''
a : Tuple = 'This is a lร test'
a : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = 'This is a lร test'
a : int = 'โThis โis โa โl ร โt est'.split()
a : str = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 186 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger('transformers.models.speecht5')
UpperCAmelCase_ = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
UpperCAmelCase_ = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
UpperCAmelCase_ = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
UpperCAmelCase_ = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
UpperCAmelCase_ = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
UpperCAmelCase_ = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
UpperCAmelCase_ = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
UpperCAmelCase_ = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = []
UpperCAmelCase_ = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Dict , __UpperCAmelCase: List[str] , __UpperCAmelCase: str , __UpperCAmelCase: int ) -> int:
for attribute in key.split('''.''' ):
UpperCamelCase__ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCamelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCamelCase__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase__ : str = value
elif weight_type == "weight_g":
UpperCamelCase__ : Tuple = value
elif weight_type == "weight_v":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "running_mean":
UpperCamelCase__ : List[Any] = value
elif weight_type == "running_var":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "num_batches_tracked":
UpperCamelCase__ : Dict = value
else:
UpperCamelCase__ : List[Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any] ) -> int:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Optional[int] = []
if task == "s2t":
UpperCamelCase__ : int = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Optional[Any] = MAPPING_S2T
UpperCamelCase__ : Optional[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase__ : Any = None
UpperCamelCase__ : Dict = MAPPING_T2S
UpperCamelCase__ : str = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase__ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase__ : Tuple = MAPPING_S2S
UpperCamelCase__ : Any = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(f"{name} was ignored" )
continue
UpperCamelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase__ : Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
UpperCamelCase__ : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : List[Any] = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCamelCase__ : Dict = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCamelCase__ : Optional[int] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : List[Any] = 'bias'
elif "weight" in name:
UpperCamelCase__ : Optional[Any] = 'weight'
elif "running_mean" in name:
UpperCamelCase__ : Optional[Any] = 'running_mean'
elif "running_var" in name:
UpperCamelCase__ : Optional[Any] = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase__ : Union[str, Any] = 'num_batches_tracked'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Dict , __UpperCAmelCase: Tuple ) -> List[Any]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : Optional[Any] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCamelCase__ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCamelCase__ : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int]=None , __UpperCAmelCase: Any=None , __UpperCAmelCase: Tuple=None , ) -> List[Any]:
if config_path is not None:
UpperCamelCase__ : int = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCamelCase__ : int = SpeechTaConfig()
if task == "s2t":
UpperCamelCase__ : Dict = config.max_text_positions
UpperCamelCase__ : Dict = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
UpperCamelCase__ : Any = 1876
UpperCamelCase__ : int = 600
UpperCamelCase__ : Dict = config.max_speech_positions
UpperCamelCase__ : Any = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
UpperCamelCase__ : Dict = 1876
UpperCamelCase__ : str = config.max_speech_positions
UpperCamelCase__ : str = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
UpperCamelCase__ : Union[str, Any] = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
UpperCamelCase__ : Dict = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
UpperCamelCase__ : str = SpeechTaFeatureExtractor()
UpperCamelCase__ : Optional[int] = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCamelCase__ : List[Any] = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the ๐ค hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 201 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 224 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 163 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowercase , sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 163 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 |
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40 | 0 |
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=56 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=4 , __UpperCAmelCase="block_sparse" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=3 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
__UpperCamelCase = rescale_embeddings
__UpperCamelCase = attention_type
__UpperCamelCase = use_bias
__UpperCamelCase = block_size
__UpperCamelCase = num_random_blocks
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
return model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-5 , __UpperCAmelCase="outputs" , __UpperCAmelCase=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 365 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "wavlm"
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=320 , __UpperCAmelCase=800 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=(512, 512, 512, 512, 1500) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=512 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_buckets
__UpperCamelCase = max_bucket_distance
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# adapter
__UpperCamelCase = add_adapter
__UpperCamelCase = adapter_kernel_size
__UpperCamelCase = adapter_stride
__UpperCamelCase = num_adapter_layers
__UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = list(__UpperCAmelCase )
__UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 263 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ViTFeatureExtractor"""]
lowerCamelCase__ = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 212 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A__ ( __magic_name__ ):
lowercase = 'gptj'
lowercase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , a : Dict=50_400 , a : Union[str, Any]=2_048 , a : List[str]=4_096 , a : Any=28 , a : Optional[Any]=16 , a : Optional[Any]=64 , a : int=None , a : Any="gelu_new" , a : Union[str, Any]=0.0 , a : List[Any]=0.0 , a : List[Any]=0.0 , a : Optional[Any]=1E-5 , a : Optional[int]=0.0_2 , a : int=True , a : str=50_256 , a : str=50_256 , a : Any=False , **a : Dict , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : List[Any] = n_positions
lowerCAmelCase__ : str = n_embd
lowerCAmelCase__ : str = n_layer
lowerCAmelCase__ : str = n_head
lowerCAmelCase__ : Dict = n_inner
lowerCAmelCase__ : Union[str, Any] = rotary_dim
lowerCAmelCase__ : Optional[int] = activation_function
lowerCAmelCase__ : Any = resid_pdrop
lowerCAmelCase__ : int = embd_pdrop
lowerCAmelCase__ : int = attn_pdrop
lowerCAmelCase__ : List[Any] = layer_norm_epsilon
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : str = bos_token_id
lowerCAmelCase__ : int = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class A__ ( __magic_name__ ):
def __init__( self : str , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ):
'''simple docstring'''
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , 'pad_token_id' , a ):
# TODO: how to do that better?
lowerCAmelCase__ : int = 0
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs' )
lowerCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase__ : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._config.n_head
def _lowerCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : Optional[int] = seqlen + 2
lowerCAmelCase__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : Tuple = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Any = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase__ : List[str] = ordered_inputs['attention_mask'].dtype
lowerCAmelCase__ : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 13
| 212 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase= '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCamelCase( ) -> int:
'''simple docstring'''
assert _test_patching.open is open
__lowercase= '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _UpperCamelCase ):
pass
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _UpperCamelCase ) is None
with patch_submodule(_test_patching , 'len' , _UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= '__test_patch_submodule_start_and_stop_mock__'
__lowercase= patch_submodule(_test_patching , 'open' , _UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase= '__test_patch_submodule_successive_join__'
__lowercase= '__test_patch_submodule_successive_dirname__'
__lowercase= '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.join' , _UpperCamelCase ):
with patch_submodule(_test_patching , 'os.path.dirname' , _UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _UpperCamelCase ):
pass
| 368 |
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'upernet'
def __init__( self : List[Any] ,_UpperCAmelCase : str=None ,_UpperCAmelCase : List[Any]=512 ,_UpperCAmelCase : int=0.02 ,_UpperCAmelCase : int=[1, 2, 3, 6] ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=0.4 ,_UpperCAmelCase : Optional[int]=384 ,_UpperCAmelCase : Optional[Any]=256 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : Tuple=255 ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : List[Any] = backbone_config.get('model_type' )
_a : List[str] = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_UpperCAmelCase )
_a : Optional[int] = backbone_config
_a : Union[str, Any] = hidden_size
_a : str = initializer_range
_a : Any = pool_scales
_a : str = use_auxiliary_head
_a : Tuple = auxiliary_loss_weight
_a : Optional[Any] = auxiliary_in_channels
_a : Union[str, Any] = auxiliary_channels
_a : List[str] = auxiliary_num_convs
_a : List[str] = auxiliary_concat_input
_a : Any = loss_ignore_index
def __lowercase ( self : Optional[int] ):
_a : int = copy.deepcopy(self.__dict__ )
_a : List[Any] = self.backbone_config.to_dict()
_a : Dict = self.__class__.model_type
return output
| 89 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = UNetaDModel
_snake_case : List[str] = 'sample'
@property
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : List[Any] ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : int = UNetaDModel
_snake_case : Optional[Any] = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self : Dict ):
return (4, 32, 32)
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
_UpperCAmelCase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 ) )
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[Any] = UNetaDModel
_snake_case : str = 'sample'
@property
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self : Any ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__lowerCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) )
def lowerCAmelCase_ ( self : List[str] ):
# not required for this model
pass
| 289 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
A: Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
A: int = [[0.0, 0.0], [0.0, 0.0]]
A: int = matrix[1][1], matrix[0][0]
A: Tuple = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
A: Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
A: str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
A: str = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
A: Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
A: Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
A: List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
A: Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
A: Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
A: Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
A: List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
A: Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
A: List[Any] = array(A__ )
for i in range(3 ):
for j in range(3 ):
A: Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
A: int = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 351 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''camembert-base''': 512,
}
UpperCamelCase = '''โ'''
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : int = CamembertTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any:
'''simple docstring'''
A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Any = vocab_file
A: Any = False if not self.vocab_file else True
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: List[str] = [self.cls_token_id]
A: List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: List[str] = [self.sep_token_id]
A: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Dict = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 334 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """funnel"""
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=[4, 4, 4] , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1e-9 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE="relative_shift" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = block_sizes
UpperCamelCase = [1] * len(_SCREAMING_SNAKE_CASE ) if block_repeats is None else block_repeats
assert len(_SCREAMING_SNAKE_CASE ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCamelCase = num_decoder_layers
UpperCamelCase = d_model
UpperCamelCase = n_head
UpperCamelCase = d_head
UpperCamelCase = d_inner
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = initializer_range
UpperCamelCase = initializer_std
UpperCamelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
UpperCamelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
UpperCamelCase = attention_type
UpperCamelCase = separate_cls
UpperCamelCase = truncate_seq
UpperCamelCase = pool_q_only
super().__init__(**_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def A__ ( self ) -> Any:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 321 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = BertTokenizer
lowercase = BertTokenizerFast
lowercase = True
lowercase = True
lowercase = filter_non_english
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running'
lowerCAmelCase__ : int = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running'
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(a )
lowerCAmelCase__ : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a )
lowerCAmelCase__ : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# With lower casing
lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=a )
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer(do_lower_case=a )
lowerCAmelCase__ : Dict = 'UNwant\u00E9d,running'
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : Any = tokenizer.encode(a )
lowerCAmelCase__ : Optional[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hรคllo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['HรคLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHรคLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer()
lowerCAmelCase__ : Tuple = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCAmelCase__ : Optional[Any] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(a ) , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase__ : Dict = {}
for i, token in enumerate(a ):
lowerCAmelCase__ : Union[str, Any] = i
lowerCAmelCase__ : Optional[Any] = WordpieceTokenizer(vocab=a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCAmelCase__ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Optional[int] = f'''A, naรฏve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase__ : List[Any] = tokenizer_r.encode_plus(
a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , )
lowerCAmelCase__ : Tuple = tokenizer_r.do_lower_case if hasattr(a , 'do_lower_case' ) else False
lowerCAmelCase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##รฏ'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = ['็', 'ไบบ', 'ๆ']
lowerCAmelCase__ : Optional[Any] = ''.join(a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = tokenizer_p.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Dict = tokenizer_r.encode(a , add_special_tokens=a )
lowerCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a , a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : int = tokenizer_r.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(a )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ : Optional[int] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(a )
]
self.assertListEqual(a , a )
self.assertListEqual(a , a )
| 353 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase : Optional[Any] = getLogger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 8, lowerCamelCase = 1_0_2_4, lowerCamelCase="val", lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase="summarization", lowerCamelCase=None, lowerCamelCase=1, lowerCamelCase = None, lowerCamelCase="", **lowerCamelCase, ):
__lowerCAmelCase = str(lowerCAmelCase__)
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''', rank=lowerCAmelCase__)
__lowerCAmelCase = Path(lowerCAmelCase__)
__lowerCAmelCase = save_dir.joinpath(F"""rank_{local_rank}_output.json""")
torch.cuda.set_device(lowerCAmelCase__)
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__).cuda()
if fpaa:
__lowerCAmelCase = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__, lowerCAmelCase__) # update config with task specific params
__lowerCAmelCase = generate_kwargs.pop('''num_beams''', model.config.num_beams) # AttributeError risk?
if num_return_sequences > num_beams:
__lowerCAmelCase = num_return_sequences
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase__)
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""") # if this is wrong, check config.model_type.
if max_source_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config, '''prefix''', '''''') or ''''''
__lowerCAmelCase = SeqaSeqDataset(
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, max_target_length=1_0_2_4, type_path=lowerCAmelCase__, n_obs=lowerCAmelCase__, prefix=lowerCAmelCase__, **lowerCAmelCase__, )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__lowerCAmelCase = ds.make_sortish_sampler(lowerCAmelCase__, distributed=lowerCAmelCase__, add_extra_examples=lowerCAmelCase__, shuffle=lowerCAmelCase__)
__lowerCAmelCase = DataLoader(lowerCAmelCase__, sampler=lowerCAmelCase__, batch_size=lowerCAmelCase__, collate_fn=ds.collate_fn)
__lowerCAmelCase = []
for batch in tqdm(lowerCAmelCase__):
__lowerCAmelCase = model.generate(
input_ids=batch['''input_ids'''].to(model.device), attention_mask=batch['''attention_mask'''].to(model.device), num_return_sequences=lowerCAmelCase__, num_beams=lowerCAmelCase__, **lowerCAmelCase__, )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__)
__lowerCAmelCase = batch['''ids''']
if num_return_sequences > 1:
__lowerCAmelCase = chunks(lowerCAmelCase__, lowerCAmelCase__) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__):
results.append({'''pred''': pred, '''id''': ids[i].item()})
save_json(lowerCAmelCase__, lowerCAmelCase__)
return results, sampler.num_replicas
def __magic_name__( ):
__lowerCAmelCase = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''')
parser.add_argument('''--data_dir''', type=lowerCAmelCase__, help='''like cnn_dm/test.source''')
parser.add_argument(
'''--model_name''', type=lowerCAmelCase__, help='''like facebook/bart-large-cnn,t5-base, etc.''', default='''sshleifer/distilbart-xsum-12-3''', )
parser.add_argument('''--save_dir''', type=lowerCAmelCase__, help='''where to save''', default='''tmp_gen''')
parser.add_argument('''--max_source_length''', type=lowerCAmelCase__, default=lowerCAmelCase__)
parser.add_argument(
'''--type_path''', type=lowerCAmelCase__, default='''test''', help='''which subset to evaluate typically train/val/test''')
parser.add_argument('''--task''', type=lowerCAmelCase__, default='''summarization''', help='''used for task_specific_params + metrics''')
parser.add_argument('''--bs''', type=lowerCAmelCase__, default=8, required=lowerCAmelCase__, help='''batch size''')
parser.add_argument(
'''--local_rank''', type=lowerCAmelCase__, default=-1, required=lowerCAmelCase__, help='''should be passed by distributed.launch''')
parser.add_argument(
'''--n_obs''', type=lowerCAmelCase__, default=lowerCAmelCase__, required=lowerCAmelCase__, help='''How many observations. Defaults to all.''')
parser.add_argument(
'''--num_return_sequences''', type=lowerCAmelCase__, default=1, required=lowerCAmelCase__, help='''How many sequences to return''')
parser.add_argument(
'''--sync_timeout''', type=lowerCAmelCase__, default=6_0_0, required=lowerCAmelCase__, help='''How long should master process wait for other processes to finish.''', )
parser.add_argument('''--src_lang''', type=lowerCAmelCase__, default=lowerCAmelCase__, required=lowerCAmelCase__)
parser.add_argument('''--tgt_lang''', type=lowerCAmelCase__, default=lowerCAmelCase__, required=lowerCAmelCase__)
parser.add_argument(
'''--prefix''', type=lowerCAmelCase__, required=lowerCAmelCase__, default=lowerCAmelCase__, help='''will be added to the begininng of src examples''')
parser.add_argument('''--fp16''', action='''store_true''')
parser.add_argument('''--debug''', action='''store_true''')
__lowerCAmelCase = time.time()
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__)
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""")
__lowerCAmelCase = Path(args.save_dir + '''_tmp''')
Path(lowerCAmelCase__).mkdir(exist_ok=lowerCAmelCase__) # this handles locking.
__lowerCAmelCase = list(json_save_dir.glob('''rank_*.json'''))
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""")
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__lowerCAmelCase = {}
if args.src_lang is not None:
__lowerCAmelCase = args.src_lang
if args.tgt_lang is not None:
__lowerCAmelCase = args.tgt_lang
Path(args.save_dir).mkdir(exist_ok=lowerCAmelCase__)
__lowerCAmelCase , __lowerCAmelCase = eval_data_dir(
args.data_dir, lowerCAmelCase__, args.model_name, type_path=args.type_path, bs=args.bs, fpaa=args.fpaa, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=lowerCAmelCase__, **lowerCAmelCase__, )
if args.local_rank <= 0:
__lowerCAmelCase = Path(args.save_dir)
save_dir.mkdir(exist_ok=lowerCAmelCase__)
__lowerCAmelCase = gather_results_from_each_node(lowerCAmelCase__, lowerCAmelCase__, args.sync_timeout)
__lowerCAmelCase = combine_partial_results(lowerCAmelCase__)
if args.num_return_sequences > 1:
__lowerCAmelCase = save_dir.joinpath('''pseudolabel_results.json''')
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""")
save_json(lowerCAmelCase__, lowerCAmelCase__)
return
__lowerCAmelCase = Path(args.data_dir).joinpath(args.type_path + '''.target''')
with open(lowerCAmelCase__) as f:
__lowerCAmelCase = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__)]
# Calculate metrics, save metrics, and save _generations.txt
__lowerCAmelCase = '''translation''' in args.task
__lowerCAmelCase = calculate_bleu if calc_bleu else calculate_rouge
__lowerCAmelCase = '''bleu''' if calc_bleu else '''rouge'''
__lowerCAmelCase = score_fn(lowerCAmelCase__, lowerCAmelCase__)
__lowerCAmelCase = len(lowerCAmelCase__)
__lowerCAmelCase = time.time() - start_time
__lowerCAmelCase = round(runtime / metrics['''n_obs'''], 4)
__lowerCAmelCase = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__lowerCAmelCase = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""")
save_json(lowerCAmelCase__, lowerCAmelCase__, indent=lowerCAmelCase__)
print(lowerCAmelCase__)
write_txt_file(lowerCAmelCase__, save_dir.joinpath(F"""{args.type_path}_generations.txt"""))
if args.debug:
write_txt_file(lowerCAmelCase__, save_dir.joinpath(F"""{args.type_path}.target"""))
else:
shutil.rmtree(lowerCAmelCase__)
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__)
__lowerCAmelCase = sorted(lowerCAmelCase__, key=lambda lowerCamelCase: x["id"])
__lowerCAmelCase = [x['''pred'''] for x in records]
return preds
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = time.time()
logger.info('''waiting for all nodes to finish''')
__lowerCAmelCase = None
while (time.time() - start_wait) < timeout:
__lowerCAmelCase = list(save_dir.glob('''rank_*.json'''))
if len(lowerCAmelCase__) < num_replicas:
continue
try:
# make sure all json files are fully saved
__lowerCAmelCase = lmap(lowerCAmelCase__, lowerCAmelCase__)
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''')
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 174 |
"""simple docstring"""
import argparse
import json
import subprocess
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
lowercase = subprocess.run(lowerCAmelCase__ , shell=lowerCAmelCase__ , stdout=subprocess.PIPE )
lowercase = output.stdout.decode("""utf-8""" )
lowercase = json.loads(lowerCAmelCase__ )
lowercase = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCAmelCase__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Tuple:
'''simple docstring'''
return values.split(""",""" )
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
__lowerCAmelCase : str =parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 197 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __snake_case (unittest.TestCase ):
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Tuple=56 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : int=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Optional[int]="gelu_new" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Dict="block_sparse" , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[int]=3 , ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Tuple = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Union[str, Any] = use_attention_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : List[Any] = rescale_embeddings
_lowerCAmelCase : str = attention_type
_lowerCAmelCase : Any = use_bias
_lowerCAmelCase : str = block_size
_lowerCAmelCase : Union[str, Any] = num_random_blocks
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : str , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : str ):
return model(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Union[str, Any] = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[Any] = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=1E-5 , _UpperCAmelCase : Optional[int]="outputs" , _UpperCAmelCase : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
| 159 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case (_a ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : str = """pt"""
_lowerCAmelCase : List[Any] = """tf"""
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase )
model_tf.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """mock_framework"""
# Framework provided - return whatever the user provides
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : str = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : str = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : Tuple = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
| 159 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : int = TextToVideoSDPipeline
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__ : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : List[str]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = '''np'''
UpperCAmelCase_ = sd_pipe(**_snake_case).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=1e-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = pipe.to('''cuda''')
UpperCAmelCase_ = '''Spiderman is surfing'''
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type='''pt''').frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
UpperCAmelCase_ = pipe.to('''cuda''')
UpperCAmelCase_ = '''Spiderman is surfing'''
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe(_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''pt''').frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 51 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_a : Optional[Any] = 'examples/'
_a : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_a : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_a : List[str] = 'README.md'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ) -> Optional[Any]:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : str = f.read()
_lowerCAmelCase : List[Any] = REPLACE_PATTERNS[pattern]
_lowerCAmelCase : Union[str, Any] = replace.replace("""VERSION""" ,_lowerCamelCase )
_lowerCAmelCase : int = re_pattern.sub(_lowerCamelCase ,_lowerCamelCase )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict:
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ,pattern="""examples""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : Any = """๐ค Transformers currently provides the following architectures"""
_lowerCAmelCase : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
_lowerCAmelCase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_lowerCAmelCase : str = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" ,"""https://huggingface.co/docs/transformers/model_doc""" ,)
index += 1
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
with open(REPLACE_FILES["""init"""] ,"""r""" ) as f:
_lowerCAmelCase : Optional[int] = f.read()
_lowerCAmelCase : List[Any] = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple=False ) -> Tuple:
_lowerCAmelCase : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_lowerCAmelCase : List[str] = default_version.base_version
elif patch:
_lowerCAmelCase : int = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
_lowerCAmelCase : Tuple = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
_lowerCAmelCase : Any = input(f"Which version are you releasing? [{default_version}]" )
if len(_lowerCamelCase ) == 0:
_lowerCAmelCase : Optional[Any] = default_version
print(f"Updating version to {version}." )
global_version_update(_lowerCamelCase ,patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : Dict = get_version()
_lowerCAmelCase : List[Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
_lowerCAmelCase : Optional[int] = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase : int = input(f"Which version are we developing now? [{dev_version}]" )
if len(_lowerCamelCase ) == 0:
_lowerCAmelCase : List[str] = dev_version
print(f"Updating version to {version}." )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_a : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 357 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | 0 |
import random
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
'''simple docstring'''
__lowercase= {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case ,snake_case : List[str] = array[indexa], array[indexa]
def __lowerCAmelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
"""simple docstring"""
if length > 1:
snake_case : Optional[int] = int(length / 2 )
for i in range(lowercase , low + middle ):
comp_and_swap(lowercase , lowercase , i + middle , lowercase )
bitonic_merge(lowercase , lowercase , lowercase , lowercase )
bitonic_merge(lowercase , low + middle , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : list[int] , lowercase : int , lowercase : int , lowercase : int ) -> None:
"""simple docstring"""
if length > 1:
snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(lowercase , lowercase , lowercase , 1 )
bitonic_sort(lowercase , low + middle , lowercase , 0 )
bitonic_merge(lowercase , lowercase , lowercase , lowercase )
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 112 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
__snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
__snake_case = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 112 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Tuple = 5
# Realm tok
snake_case__ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
def lowerCamelCase ( self : List[str] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase ( self : int ):
snake_case__ : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] , dtype=snake_case_ , )
return block_records
def lowerCamelCase ( self : int ):
snake_case__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.get_config()
snake_case__ : Any = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" )
snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : List[str] = tokenizer(
["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : List[Any] = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_config()
snake_case__ : List[str] = self.get_dummy_retriever()
snake_case__ : str = retriever.tokenizer
snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" )
snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids
snake_case__ : Optional[int] = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
snake_case__ : Any = config.reader_seq_len
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , snake_case_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
snake_case__ : Tuple = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
| 35 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , *__a : Optional[int] , **__a : Any ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase_ = {}
def _lowercase (self : Dict , __a : str , *__a : Any , **__a : Dict ):
UpperCAmelCase_ = super().add_tokens(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _lowercase (self : List[str] , __a : List[str] , *__a : List[str] , __a : List[str]=1 , **__a : Tuple ):
UpperCAmelCase_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
output.append(__lowerCamelCase )
else:
UpperCAmelCase_ = []
for i in range(__lowerCamelCase ):
UpperCAmelCase_ = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
output.append(__lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
UpperCAmelCase_ = output
def _lowercase (self : Union[str, Any] , __a : Optional[Any] , __a : Tuple=False , __a : Any=1.0 ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase_ = []
for i in range(len(__lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase_ = self.token_map[placeholder_token]
UpperCAmelCase_ = tokens[: 1 + int(len(__lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase_ = copy.copy(__lowerCamelCase )
random.shuffle(__lowerCamelCase )
UpperCAmelCase_ = text.replace(__lowerCamelCase , " ".join(__lowerCamelCase ) )
return text
def __call__(self : List[Any] , __a : Union[str, Any] , *__a : int , __a : Optional[Any]=False , __a : str=1.0 , **__a : Dict ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase , vector_shuffle=__lowerCamelCase , prop_tokens_to_load=__lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase , )
def _lowercase (self : Any , __a : List[Any] , *__a : List[Any] , __a : str=False , __a : Dict=1.0 , **__a : List[str] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase , vector_shuffle=__lowerCamelCase , prop_tokens_to_load=__lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase , )
| 367 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 106 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = num_labels
_a = initializer_range
_a = out_features
_a = out_indices
_a = scope
def a__ (self ) -> Any:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> Any:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
_a = ConvNextVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ (self , A , A , A ) -> int:
"""simple docstring"""
_a = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self , A , A , A ) -> List[Any]:
"""simple docstring"""
_a = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a = None
_a = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __A ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase : List[str] = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : str = False
__lowerCamelCase : Union[str, Any] = False
def a__ (self ) -> str:
"""simple docstring"""
_a = ConvNextVaModelTester(self )
_a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def a__ (self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def a__ (self ) -> List[Any]:
"""simple docstring"""
pass
def a__ (self ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a = self.model_tester.prepare_config_and_inputs_with_labels()
_a = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
continue
_a = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
_a = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
_a = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def a__ (self ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a = self.model_tester.prepare_config_and_inputs_with_labels()
_a = False
_a = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE_ ), *get_values(SCREAMING_SNAKE_CASE_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_a = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
_a = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
_a = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a__ (self ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(A , A , A ):
_a = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def a__ (self ) -> int:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ():
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def a__ (self ) -> int:
"""simple docstring"""
_a = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(SCREAMING_SNAKE_CASE_ )
_a = self.default_image_processor
_a = prepare_img()
_a = preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
_a = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_a = torch.tensor([0.9996, 0.1966, -0.4386] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 211 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_stages
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ (self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ = None
UpperCamelCase__ = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ConvNextVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ (self ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE_ ),
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase_ (self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCamelCase__ = False
UpperCamelCase__ = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE_ ), *get_values(SCREAMING_SNAKE_CASE_ )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase_ (self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([0.9996, 0.1966, -0.4386] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 244 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_attention_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[str]=1_3 ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : Tuple=2 ,lowercase__ : str=3 ,lowercase__ : List[str]=6_4_0 ,lowercase__ : str=4 ,lowercase__ : Any="silu" ,lowercase__ : Any=3 ,lowercase__ : Any=3_2 ,lowercase__ : Dict=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : str=0.0_2 ,lowercase__ : Tuple=True ,lowercase__ : int=True ,lowercase__ : Dict=1_0 ,lowercase__ : str=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : int ):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = MobileViTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : List[Any] ):
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
def check_hidden_states_output(lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(lowercase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowercase = model.to(lowercase__ )
__lowercase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] ,device=lowercase__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowercase = model.to(lowercase__ )
__lowercase = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowercase__ ,target_sizes=[(5_0, 6_0)] )
__lowercase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape ,lowercase__ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowercase__ )
__lowercase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape ,lowercase__ )
| 367 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
UpperCamelCase__ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase__ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
UpperCamelCase__ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
UpperCamelCase__ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
UpperCamelCase__ = field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
UpperCamelCase__ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
UpperCamelCase__ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
UpperCamelCase__ = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
UpperCamelCase__ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
UpperCamelCase__ = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
UpperCamelCase__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase__ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
UpperCamelCase__ = field(default=1 , metadata={'''help''': '''Training seed.'''} )
UpperCamelCase__ = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase__ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
UpperCamelCase__ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase__ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
UpperCamelCase__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
UpperCamelCase__ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
UpperCamelCase__ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
UpperCamelCase__ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
UpperCamelCase__ = field(default=0.9_5 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
UpperCamelCase__ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
UpperCamelCase__ = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
UpperCamelCase__ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase__ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase__ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
UpperCamelCase__ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
UpperCamelCase__ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
UpperCamelCase__ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
UpperCamelCase__ = field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
UpperCamelCase__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase__ = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
UpperCamelCase__ = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
UpperCamelCase__ = field(
default=0.2_5 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
UpperCamelCase__ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
UpperCamelCase__ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
UpperCamelCase__ = field(
default=0.8_5 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
UpperCamelCase__ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
UpperCamelCase__ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase__ = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
UpperCamelCase__ = field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
UpperCamelCase__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
UpperCamelCase__ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
UpperCamelCase__ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
UpperCamelCase__ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 228 |
from math import pi, sqrt
def __A ( __lowerCamelCase ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __A ( ) -> None:
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : str = 1.0
while num:
__UpperCamelCase : Dict = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 228 | 1 |
def UpperCamelCase_( _snake_case : list ):
"""simple docstring"""
if len(lowerCamelCase__ ) <= 1:
return [tuple(lowerCamelCase__ )]
__a =[]
def generate(_snake_case : int , _snake_case : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__a =arr[k - 1], arr[i]
else: # k is odd
__a =arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase__ )
generate(len(lowerCamelCase__ ) , lowerCamelCase__ )
return res
if __name__ == "__main__":
_lowerCAmelCase : Dict = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 354 |
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_snake_case ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Union[str, "sqlalchemy.sql.Selectable"] , __lowercase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , **__lowercase )
snake_case_ = Sql(
cache_dir=__lowercase , features=__lowercase , sql=__lowercase , con=__lowercase , **__lowercase , )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , )
# Build dataset for splits
snake_case_ = self.builder.as_dataset(
split="train" , verification_mode=__lowercase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __lowercase : Dataset , __lowercase : str , __lowercase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , **__lowercase : List[Any] , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case_ = dataset
snake_case_ = name
snake_case_ = con
snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ = num_proc
snake_case_ = to_sql_kwargs
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.to_sql_kwargs.pop("sql" , __lowercase )
snake_case_ = self.to_sql_kwargs.pop("con" , __lowercase )
snake_case_ = self.to_sql_kwargs.pop("index" , __lowercase )
snake_case_ = self._write(index=__lowercase , **self.to_sql_kwargs )
return written
def snake_case__ ( self : Any , __lowercase : str ):
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ = args
snake_case_ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
snake_case_ = query_table(
table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ = batch.to_pandas()
snake_case_ = df.to_sql(self.name , self.con , index=__lowercase , **__lowercase )
return num_rows or len(__lowercase )
def snake_case__ ( self : Dict , __lowercase : List[Any] , **__lowercase : Dict ):
"""simple docstring"""
snake_case_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case_ , snake_case_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 187 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase , _lowerCamelCase="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) as f:
_lowerCAmelCase : int = json.load(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Union[str, Any] = []
for key, info in class_info.items():
_lowerCAmelCase : Union[str, Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCamelCase ) )
_lowerCAmelCase : Dict = thing_ids
_lowerCAmelCase : str = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=30, __a=400, __a=None, __a=True, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=10, __a=False, __a=255, __a="shi-labs/oneformer_demo", __a="ade20k_panoptic.json", __a=10, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : Optional[Any] = max_resolution
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : int = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : Optional[Any] = image_mean
_lowerCAmelCase : List[Any] = image_std
_lowerCAmelCase : Optional[int] = class_info_file
_lowerCAmelCase : Any = prepare_metadata(__a, __a)
_lowerCAmelCase : Optional[int] = num_text
_lowerCAmelCase : int = repo_path
# for the post_process_functions
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : Tuple = 10
_lowerCAmelCase : str = 3
_lowerCAmelCase : int = 4
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Tuple = do_reduce_labels
_lowerCAmelCase : Optional[int] = ignore_index
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if not batched:
_lowerCAmelCase : Optional[int] = image_inputs[0]
if isinstance(__a, Image.Image):
_lowerCAmelCase , _lowerCAmelCase : Tuple = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : str = int(self.size["shortest_edge"] * h / w)
_lowerCAmelCase : int = self.size["shortest_edge"]
elif w > h:
_lowerCAmelCase : str = self.size["shortest_edge"]
_lowerCAmelCase : int = int(self.size["shortest_edge"] * w / h)
else:
_lowerCAmelCase : Tuple = self.size["shortest_edge"]
_lowerCAmelCase : Tuple = self.size["shortest_edge"]
else:
_lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCAmelCase : List[str] = max(__a, key=lambda __a: item[0])[0]
_lowerCAmelCase : List[Any] = max(__a, key=lambda __a: item[1])[1]
return expected_height, expected_width
def snake_case__ ( self):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)), )
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCamelCase__ = image_processing_class
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OneFormerImageProcessorTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "ignore_index"))
self.assertTrue(hasattr(__a, "class_info_file"))
self.assertTrue(hasattr(__a, "num_text"))
self.assertTrue(hasattr(__a, "repo_path"))
self.assertTrue(hasattr(__a, "metadata"))
self.assertTrue(hasattr(__a, "do_reduce_labels"))
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : str = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Tuple = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input
_lowerCAmelCase : List[str] = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : int = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : str = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input
_lowerCAmelCase : int = image_processor(image_inputs[0], ["semantic"], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processing_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Tuple = image_processor(
__a, ["semantic"] * len(__a), return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self, __a=False, __a=False, __a="np"):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# prepare image and target
_lowerCAmelCase : List[str] = self.image_processing_tester.num_labels
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = prepare_image_inputs(self.image_processing_tester, equal_resolution=__a)
if with_segmentation_maps:
_lowerCAmelCase : Optional[int] = num_labels
if is_instance_map:
_lowerCAmelCase : Tuple = list(range(__a)) * 2
_lowerCAmelCase : Tuple = dict(enumerate(__a))
_lowerCAmelCase : Union[str, Any] = [
np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uinta) for img in image_inputs
]
if segmentation_type == "pil":
_lowerCAmelCase : str = [Image.fromarray(__a) for annotation in annotations]
_lowerCAmelCase : List[Any] = image_processor(
__a, ["semantic"] * len(__a), __a, return_tensors="pt", instance_id_to_semantic_id=__a, pad_and_return_pixel_mask=__a, )
return inputs
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
def common(__a=False, __a=None):
_lowerCAmelCase : Optional[int] = self.comm_get_image_processor_inputs(
with_segmentation_maps=__a, is_instance_map=__a, segmentation_type=__a)
_lowerCAmelCase : Optional[int] = inputs["mask_labels"]
_lowerCAmelCase : Optional[int] = inputs["class_labels"]
_lowerCAmelCase : Union[str, Any] = inputs["pixel_values"]
_lowerCAmelCase : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(__a, __a, __a):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
self.assertEqual(len(__a), self.image_processing_tester.num_text)
common()
common(is_instance_map=__a)
common(is_instance_map=__a, segmentation_type="pil")
common(is_instance_map=__a, segmentation_type="pil")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = np.zeros((20, 50))
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Dict = binary_mask_to_rle(__a)
self.assertEqual(len(__a), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(__a)
self.assertEqual(len(__a), self.image_processing_tester.batch_size)
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
_lowerCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size)]
_lowerCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(__a, target_sizes=__a)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : List[str] = image_processor.post_process_instance_segmentation(__a, threshold=0)
self.assertTrue(len(__a) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), __a)
self.assertEqual(
el["segmentation"].shape, (self.image_processing_tester.height, self.image_processing_tester.width))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file="ade20k_panoptic.json", num_text=self.image_processing_tester.num_text, repo_path="shi-labs/oneformer_demo", )
_lowerCAmelCase : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCAmelCase : Union[str, Any] = image_processor.post_process_panoptic_segmentation(__a, threshold=0)
self.assertTrue(len(__a) == self.image_processing_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), __a)
self.assertEqual(
el["segmentation"].shape, (self.image_processing_tester.height, self.image_processing_tester.width))
| 300 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_A = list(s_dict.keys() )
for key in keys:
_A = R'.*/layers_(\d+)'
_A = key
if re.match(_snake_case , _snake_case ):
_A = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _snake_case )
_A = R'(encoder|decoder)\/'
if re.match(_snake_case , _snake_case ):
_A = re.match(_snake_case , _snake_case ).groups()
if groups[0] == "encoder":
_A = re.sub(R'/mlp/' , R'/1/mlp/' , _snake_case )
_A = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _snake_case )
elif groups[0] == "decoder":
_A = re.sub(R'/mlp/' , R'/2/mlp/' , _snake_case )
_A = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _snake_case )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_A = new_key.replace(_snake_case , _snake_case )
print(F'''{key} -> {new_key}''' )
_A = s_dict.pop(_snake_case )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_A = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_A = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_A = s_dict[key].shape[0]
_A = s_dict[key]
for idx in range(_snake_case ):
_A = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(_snake_case )
return s_dict
a = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple ) -> Any:
'''simple docstring'''
import regex as re
with open(_snake_case , 'r' ) as f:
_A = f.read()
_A = re.findall(R'(.*) = ([0-9.]*)' , _snake_case )
_A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_A = float(_snake_case ) if '.' in value else int(_snake_case )
_A = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _snake_case )[0]
_A = str(activation[1] )
_A = num_experts
_A = SwitchTransformersConfig(**_snake_case )
return config
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : List[Any]=None , _snake_case : Optional[Any]="./" , _snake_case : List[str]=8 ) -> int:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_A = checkpoints.load_tax_checkpoint(_snake_case )
if gin_file is not None:
_A = convert_gin_to_config(_snake_case , _snake_case )
else:
_A = SwitchTransformersConfig.from_pretrained(_snake_case )
_A = SwitchTransformersForConditionalGeneration(_snake_case )
_A = flax_params['target']
_A = flatten_dict(_snake_case , sep='/' )
_A = rename_keys(_snake_case )
_A = unflatten_dict(_snake_case , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 315 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 315 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1e-12 ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
lowerCAmelCase__ :int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :CLIPConfig
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ :str = nn.Dense(self.config.projection_dim , use_bias=__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ :Optional[Any] = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
lowerCAmelCase__ :Optional[int] = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ :Any = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
lowerCAmelCase__ :List[Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.vision_model(__UpperCAmelCase )[1]
lowerCAmelCase__ :Optional[int] = self.visual_projection(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = jax_cosine_distance(__UpperCAmelCase , self.special_care_embeds )
lowerCAmelCase__ :Tuple = jax_cosine_distance(__UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ :Dict = 0.0
lowerCAmelCase__ :List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ :Optional[Any] = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ :List[Any] = is_special_care * 0.01
lowerCAmelCase__ :Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ :Any = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = CLIPConfig
__magic_name__ :Tuple = """clip_input"""
__magic_name__ :str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = jnp.floataa , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
if input_shape is None:
lowerCAmelCase__ :Dict = (1, 2_2_4, 2_2_4, 3)
lowerCAmelCase__ :Any = self.module_class(config=__UpperCAmelCase , dtype=__UpperCAmelCase , **__UpperCAmelCase )
super().__init__(__UpperCAmelCase , __UpperCAmelCase , input_shape=__UpperCAmelCase , seed=__UpperCAmelCase , dtype=__UpperCAmelCase , _do_init=_do_init )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :str = jax.random.normal(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {'params': params_rng, 'dropout': dropout_rng}
lowerCAmelCase__ :Optional[int] = self.module.init(__UpperCAmelCase , __UpperCAmelCase )['params']
return random_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 254 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 1 |
def lowerCamelCase__ ( _a , _a):
def get_matched_characters(_a , _a) -> str:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[Any] = min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
SCREAMING_SNAKE_CASE : Dict = int(max(0 , i - limit))
SCREAMING_SNAKE_CASE : int = int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(__lowerCAmelCase)
SCREAMING_SNAKE_CASE : List[str] = f"{_stra[0:_stra.index(__lowerCAmelCase)]} {_stra[_stra.index(__lowerCAmelCase) + 1:]}"
return "".join(__lowerCAmelCase)
# matching characters
SCREAMING_SNAKE_CASE : Optional[Any] = get_matched_characters(__lowerCAmelCase , __lowerCAmelCase)
SCREAMING_SNAKE_CASE : Union[str, Any] = get_matched_characters(__lowerCAmelCase , __lowerCAmelCase)
SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCAmelCase)
# transposition
SCREAMING_SNAKE_CASE : Tuple = (
len([(ca, ca) for ca, ca in zip(__lowerCAmelCase , __lowerCAmelCase) if ca != ca]) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE : Any = 0.0
else:
SCREAMING_SNAKE_CASE : Optional[int] = (
1
/ 3
* (
match_count / len(__lowerCAmelCase)
+ match_count / len(__lowerCAmelCase)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE : Optional[int] = 0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 76 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = 'MIT'
lowerCamelCase__ = '1.0.0'
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = '[email protected]'
lowerCamelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : list[str] = []
_UpperCAmelCase : List[str] = domain
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase : Dict = parse.urljoin(self.domain , lowerCamelCase__ )
self.urls.append(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] )
def __lowerCAmelCase (__lowerCAmelCase ):
return parse.urlparse(__lowerCAmelCase ).netloc
def __lowerCAmelCase (__lowerCAmelCase = "https://github.com" ):
_UpperCAmelCase : List[str] = get_domain_name(__lowerCAmelCase )
# Initialize the parser
_UpperCAmelCase : int = Parser(__lowerCAmelCase )
try:
# Open URL
_UpperCAmelCase : Any = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase : Any = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase : List[str] = requests.get(__lowerCAmelCase )
# Get the valid email.
_UpperCAmelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 234 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list ) -> Dict:
"""simple docstring"""
snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case ,snake_case : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__snake_case = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 367 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Dict , lowercase : int ) -> int:
"""simple docstring"""
with open(lowercase ) as metadata_file:
snake_case : str = json.load(lowercase )
snake_case : Optional[Any] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case : Tuple = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
snake_case : Optional[Any] = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
snake_case : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Tuple = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
snake_case : str = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
snake_case : str = json.load(lowercase )
snake_case : List[str] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
snake_case : Dict = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
snake_case : Tuple = tokenizer.convert_tokens_to_ids(["@"] )[0]
snake_case : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Tuple = state_dict[bias_name]
snake_case : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case : str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[int] = state_dict["entity_predictions.bias"]
snake_case : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Union[str, Any] = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
snake_case : Any = state_dict[key]
else:
snake_case : Tuple = state_dict[key]
snake_case ,snake_case : Optional[Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
snake_case : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
snake_case : str = (0, 9)
snake_case : Union[str, Any] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : int = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : int = torch.Size((1, 33, 768) )
snake_case : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 1, 768) )
snake_case : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[str] = MLukeTokenizer.from_pretrained(lowercase )
snake_case : List[Any] = "Tokyo is the capital of <mask>."
snake_case : Optional[Any] = (24, 30)
snake_case : List[str] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : Any = model(**lowercase )
snake_case : int = encoding["input_ids"][0].tolist()
snake_case : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
snake_case : Tuple = outputs.entity_logits[0][0].argmax().item()
snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ["[MASK]", "[PAD]", "[UNK]"]
snake_case : Optional[Any] = [json.loads(lowercase ) for line in open(lowercase )]
snake_case : Any = {}
for entry in data:
snake_case : Union[str, Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Union[str, Any] = entity_id
break
snake_case : Dict = F'{language}:{entity_name}'
snake_case : str = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 112 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = []
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
self.events.append("""on_init_end""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
self.events.append("""on_train_begin""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
self.events.append("""on_train_end""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
self.events.append("""on_epoch_begin""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
self.events.append("""on_epoch_end""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
self.events.append("""on_step_begin""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
self.events.append("""on_step_end""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
self.events.append("""on_evaluate""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
self.events.append("""on_predict""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
self.events.append("""on_save""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
self.events.append("""on_log""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
self.events.append("""on_prediction_step""" )
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase_ ( self ) -> Tuple:
shutil.rmtree(self.output_dir )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ,__UpperCAmelCase=0 ,__UpperCAmelCase=64 ,__UpperCAmelCase=64 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Optional[int]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase__ : Tuple = RegressionDataset(length=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = RegressionDataset(length=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = RegressionModelConfig(a=__UpperCAmelCase ,b=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = RegressionPreTrainedModel(__UpperCAmelCase )
lowerCAmelCase__ : int = TrainingArguments(self.output_dir ,disable_tqdm=__UpperCAmelCase ,report_to=[] ,**__UpperCAmelCase )
return Trainer(
__UpperCAmelCase ,__UpperCAmelCase ,train_dataset=__UpperCAmelCase ,eval_dataset=__UpperCAmelCase ,callbacks=__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
# Order doesn't matter
lowerCAmelCase__ : Optional[int] = sorted(__UpperCAmelCase ,key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cb.__class__.__name__ )
lowerCAmelCase__ : Optional[int] = sorted(__UpperCAmelCase ,key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(__UpperCAmelCase ,__UpperCAmelCase ):
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase ,cba.__class__ )
elif not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
self.assertEqual(cba.__class__ ,__UpperCAmelCase )
else:
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Any = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = len(trainer.get_eval_dataloader() )
lowerCAmelCase__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Any = self.get_trainer()
lowerCAmelCase__ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase__ : Tuple = self.get_trainer(disable_tqdm=__UpperCAmelCase )
lowerCAmelCase__ : int = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase__ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.get_trainer()
lowerCAmelCase__ : Optional[int] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(cb.__class__ ,__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 ,__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
# We can also add, pop, or remove by instance
lowerCAmelCase__ : Optional[int] = self.get_trainer()
lowerCAmelCase__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__UpperCAmelCase )
expected_callbacks.remove(__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.get_trainer()
lowerCAmelCase__ : Dict = trainer.callback_handler.callbacks[0]
lowerCAmelCase__ : List[Any] = trainer.pop_callback(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
trainer.add_callback(__UpperCAmelCase )
expected_callbacks.insert(0 ,__UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" ,category=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
# Independent log/save/eval
lowerCAmelCase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
lowerCAmelCase__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
lowerCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
lowerCAmelCase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
# A bit of everything
lowerCAmelCase__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy="""steps""" ,)
trainer.train()
lowerCAmelCase__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase__ : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
| 37 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_:
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
return None
class lowerCAmelCase_:
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Any:
from transformers import BertModel
lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__UpperCAmelCase ) )
vocab_file.flush()
lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase )
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase )
lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers import BertModel
lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
from transformers import TFBertModel
lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 37 | 1 |
def __UpperCamelCase ( _A : float ) ->float:
"""simple docstring"""
return 10 - x * x
def __UpperCamelCase ( _A : float , _A : float ) ->float:
"""simple docstring"""
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError("""Wrong space!""" )
lowerCamelCase_ =a
while (b - a) >= 0.0_1:
# Find middle point
lowerCamelCase_ =(a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
lowerCamelCase_ =c
else:
lowerCamelCase_ =c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49 |
from string import ascii_uppercase
__A : int = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase ( _A : int , _A : int ) ->str:
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(_A , _A ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(_A , _A ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCamelCase_ =""""""
lowerCamelCase_ =0
lowerCamelCase_ =0
while div != 1:
lowerCamelCase_ , lowerCamelCase_ =divmod(_A , _A )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ =ALPHABET_VALUES[str(_A )]
else:
lowerCamelCase_ =str(_A )
new_value += actual_value
lowerCamelCase_ =num // base
lowerCamelCase_ =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49 | 1 |
from __future__ import annotations
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__lowercase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__lowercase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : list[int] ):
'''simple docstring'''
_A = len(__UpperCAmelCase )
_A = [0] * len_array
if len_array > 0:
_A = array[0]
for i in range(1 , __UpperCAmelCase ):
_A = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = DiTPipeline
a_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
a_ : Dict = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
a_ : List[Any] = False
def lowerCamelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Any = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a_ , )
lowerCAmelCase_ : Optional[Any] = AutoencoderKL()
lowerCAmelCase_ : Optional[int] = DDIMScheduler()
lowerCAmelCase_ : Optional[int] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase ( self : List[str] , a_ : List[Any] , a_ : str=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
else:
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase_ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = "cpu"
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs(a_ )
lowerCAmelCase_ : Union[str, Any] = pipe(**a_ ).images
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCAmelCase_ : Any = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
lowerCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def lowerCamelCase ( self : int ):
self._test_inference_batch_single_identical(relax_max_difference=a_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowerCAmelCase_ : int = ["vase", "umbrella", "white shark", "white wolf"]
lowerCAmelCase_ : Union[str, Any] = pipe.get_label_ids(a_ )
lowerCAmelCase_ : int = pipe(a_ , generator=a_ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a_ , a_ ):
lowerCAmelCase_ : Union[str, Any] = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowerCAmelCase_ : List[Any] = ["vase", "umbrella"]
lowerCAmelCase_ : Optional[Any] = pipe.get_label_ids(a_ )
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a_ , a_ ):
lowerCAmelCase_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 161 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] = ["""input_ids""", """attention_mask"""]
a_ : int = BlenderbotTokenizer
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : int=None , a_ : str="replace" , a_ : Tuple="<s>" , a_ : Optional[int]="</s>" , a_ : Union[str, Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : str="<pad>" , a_ : List[Any]="<mask>" , a_ : Tuple=False , a_ : Dict=True , **a_ : str , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**a_ )
lowerCAmelCase_ : Any = add_prefix_space
lowerCAmelCase_ : str = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[int] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Any = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : int = trim_offsets
lowerCAmelCase_ : List[str] = True
if changes_to_apply:
lowerCAmelCase_ : Optional[Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : int , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : Tuple = value
def lowerCamelCase ( self : int , *a_ : List[str] , **a_ : Optional[int] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[str] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Optional[str] = None ):
lowerCAmelCase_ : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : "Conversation" ):
lowerCAmelCase_ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : Tuple = " ".join(a_ )
lowerCAmelCase_ : Any = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 161 | 1 |
from __future__ import annotations
_A = tuple[int, int, int]
_A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_A = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_A = "FOBHMDKEXQNRAULPGSJVTYICZW"
_A = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_A = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_A = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_A = "SGLCPQWZHKXAREONTFBVIYJUDM"
_A = "HVSICLTYKQUBXDWAJZOMFGPREN"
_A = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_A = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_A = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowerCamelCase__ ( __lowerCAmelCase : RotorPositionT , __lowerCAmelCase : RotorSelectionT , __lowerCAmelCase : str ):
"""simple docstring"""
if (unique_rotsel := len(set(__lowerCAmelCase ) )) < 3:
lowerCAmelCase_ = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(__lowerCAmelCase )
# Checks if rotor positions are valid
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rotpos
if not 0 < rotorposa <= len(__lowerCAmelCase ):
lowerCAmelCase_ = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(__lowerCAmelCase )
if not 0 < rotorposa <= len(__lowerCAmelCase ):
lowerCAmelCase_ = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__lowerCAmelCase )
if not 0 < rotorposa <= len(__lowerCAmelCase ):
lowerCAmelCase_ = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(__lowerCAmelCase )
# Validates string and returns dict
lowerCAmelCase_ = _plugboard(__lowerCAmelCase )
return rotpos, rotsel, pbdict
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase_ = F"""Plugboard setting isn't type string ({type(__lowerCAmelCase )})"""
raise TypeError(__lowerCAmelCase )
elif len(__lowerCAmelCase ) % 2 != 0:
lowerCAmelCase_ = F"""Odd number of symbols ({len(__lowerCAmelCase )})"""
raise Exception(__lowerCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
lowerCAmelCase_ = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase_ = F"""'{i}' not in list of symbols"""
raise Exception(__lowerCAmelCase )
elif i in tmppbl:
lowerCAmelCase_ = F"""Duplicate symbol ({i})"""
raise Exception(__lowerCAmelCase )
else:
tmppbl.add(__lowerCAmelCase )
del tmppbl
# Created the dictionary
lowerCAmelCase_ = {}
for j in range(0 , len(__lowerCAmelCase ) - 1 , 2 ):
lowerCAmelCase_ = pbstring[j + 1]
lowerCAmelCase_ = pbstring[j]
return pb
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : RotorPositionT , __lowerCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , __lowerCAmelCase : str = "" , ):
"""simple docstring"""
lowerCAmelCase_ = text.upper()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = _validator(
__lowerCAmelCase , __lowerCAmelCase , plugb.upper() )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rotor_position
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase_ = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase_ = abc.index(__lowerCAmelCase ) + rotorposa
lowerCAmelCase_ = rotora[index % len(__lowerCAmelCase )]
# rotor rb --------------------------
lowerCAmelCase_ = abc.index(__lowerCAmelCase ) + rotorposa
lowerCAmelCase_ = rotora[index % len(__lowerCAmelCase )]
# rotor rc --------------------------
lowerCAmelCase_ = abc.index(__lowerCAmelCase ) + rotorposa
lowerCAmelCase_ = rotora[index % len(__lowerCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase_ = reflector[symbol]
# 2nd rotors
lowerCAmelCase_ = abc[rotora.index(__lowerCAmelCase ) - rotorposa]
lowerCAmelCase_ = abc[rotora.index(__lowerCAmelCase ) - rotorposa]
lowerCAmelCase_ = abc[rotora.index(__lowerCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__lowerCAmelCase ):
lowerCAmelCase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCAmelCase ):
lowerCAmelCase_ = 0
rotorposa += 1
if rotorposa >= len(__lowerCAmelCase ):
lowerCAmelCase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
_A = "This is my Python script that emulates the Enigma machine from WWII."
_A = (1, 1, 1)
_A = "pictures"
_A = (rotora, rotora, rotora)
_A = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 231 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =CLIPTokenizer
_lowercase =CLIPTokenizerFast
_lowercase =True
_lowercase ={}
_lowercase =False
def __a ( self ) -> Dict:
super().setUp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , **_UpperCamelCase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = "A\n'll 11p223RFโho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase_ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "โฆ" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase_ = f""" {text}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __a ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ) -> str:
super().test_tokenization_python_rust_equals()
def __a ( self ) -> Any:
# CLIP always lower cases letters
pass
| 231 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase = get_logger(__name__)
def lowerCamelCase (a_ :Any , a_ :Optional[int] , a_ :str , a_ :Optional[Any] , a_ :List[str]=0) -> Optional[Any]:
os.makedirs(a_ , exist_ok=a_)
with FSDP.state_dict_type(
a_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
lowercase :List[Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase :str = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase :Tuple = os.path.join(a_ , a_)
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""")
torch.save(a_ , a_)
logger.info(F"""Model saved to {output_model_file}""")
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase :Union[str, Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase :List[str] = os.path.join(a_ , a_)
logger.info(F"""Saving model to {output_model_file}""")
torch.save(a_ , a_)
logger.info(F"""Model saved to {output_model_file}""")
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase :int = os.path.join(a_ , F"""{MODEL_NAME}_{model_index}""")
os.makedirs(a_ , exist_ok=a_)
logger.info(F"""Saving model to {ckpt_dir}""")
lowercase :Optional[int] = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=a_ , storage_writer=dist_cp.FileSystemWriter(a_) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""")
def lowerCamelCase (a_ :Any , a_ :str , a_ :Union[str, Any] , a_ :int , a_ :int=0) -> Tuple:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(a_) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''')
return
lowercase :List[Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase :List[Any] = os.path.join(a_ , a_)
logger.info(F"""Loading model from {input_model_file}""")
lowercase :str = torch.load(a_)
logger.info(F"""Model loaded from {input_model_file}""")
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase :int = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase :Dict = os.path.join(a_ , a_)
logger.info(F"""Loading model from {input_model_file}""")
lowercase :Dict = torch.load(a_)
logger.info(F"""Model loaded from {input_model_file}""")
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase :Any = (
os.path.join(a_ , F"""{MODEL_NAME}_{model_index}""")
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""")
lowercase :Union[str, Any] = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=a_ , storage_reader=dist_cp.FileSystemReader(a_) , planner=DefaultLoadPlanner() , )
lowercase :Tuple = state_dict['''model''']
logger.info(F"""Model loaded from {ckpt_dir}""")
model.load_state_dict(a_)
def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Any , a_ :List[str] , a_ :Optional[Any] , a_ :List[str]=0) -> List[Any]:
os.makedirs(a_ , exist_ok=a_)
with FSDP.state_dict_type(
a_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
lowercase :int = FSDP.optim_state_dict(a_ , a_)
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase :Optional[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase :Tuple = os.path.join(a_ , a_)
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""")
torch.save(a_ , a_)
logger.info(F"""Optimizer state saved in {output_optimizer_file}""")
else:
lowercase :List[str] = os.path.join(a_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""")
os.makedirs(a_ , exist_ok=a_)
logger.info(F"""Saving Optimizer state to {ckpt_dir}""")
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(a_) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""")
def lowerCamelCase (a_ :List[Any] , a_ :Tuple , a_ :Optional[int] , a_ :Any , a_ :List[Any] , a_ :str=0) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
a_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase :Optional[int] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase :Any = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase :List[Any] = os.path.join(a_ , a_)
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""")
lowercase :Union[str, Any] = torch.load(a_)
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""")
else:
lowercase :Optional[int] = (
os.path.join(a_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""")
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""")
lowercase :str = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(a_) , )
lowercase :Union[str, Any] = optim_state['''optimizer''']
logger.info(F"""Optimizer loaded from {ckpt_dir}""")
lowercase :str = FSDP.optim_state_dict_to_load(a_ , a_ , a_)
optimizer.load_state_dict(a_)
| 351 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str) -> YolosConfig:
lowercase :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase :List[str] = 192
lowercase :List[str] = 768
lowercase :int = 12
lowercase :str = 3
lowercase :List[Any] = [800, 1333]
lowercase :Any = False
elif yolos_name == "yolos_s_dWr":
lowercase :List[str] = 330
lowercase :List[Any] = 14
lowercase :int = 6
lowercase :List[Any] = 1320
elif "yolos_s" in yolos_name:
lowercase :int = 384
lowercase :Union[str, Any] = 1536
lowercase :int = 12
lowercase :str = 6
elif "yolos_b" in yolos_name:
lowercase :Dict = [800, 1344]
lowercase :List[str] = 91
lowercase :List[Any] = '''huggingface/label-files'''
lowercase :Union[str, Any] = '''coco-detection-id2label.json'''
lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :int = in_proj_weight[: config.hidden_size, :]
lowercase :List[str] = in_proj_bias[: config.hidden_size]
lowercase :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Any = in_proj_weight[-config.hidden_size :, :]
lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase (a_ :str) -> str:
if "backbone" in name:
lowercase :Optional[int] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
lowercase :Any = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowercase :Tuple = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
lowercase :List[Any] = orig_state_dict.pop(a_)
if "qkv" in key:
lowercase :str = key.split('''.''')
lowercase :List[str] = int(key_split[2])
lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase :List[Any] = val[:dim, :]
lowercase :Optional[int] = val[
dim : dim * 2, :
]
lowercase :Any = val[-dim:, :]
else:
lowercase :List[str] = val[:dim]
lowercase :Union[str, Any] = val[dim : dim * 2]
lowercase :List[Any] = val[-dim:]
else:
lowercase :List[str] = val
return orig_state_dict
def lowerCamelCase () -> torch.Tensor:
lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]:
lowercase :Union[str, Any] = get_yolos_config(a_)
# load original state_dict
lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model''']
# load ๐ค model
lowercase :Tuple = YolosForObjectDetection(a_)
model.eval()
lowercase :Dict = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512
lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_)
lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''')
lowercase :List[Any] = model(**a_)
lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes
lowercase , lowercase :int = None, None
if yolos_name == "yolos_ti":
lowercase :Dict = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]])
lowercase :Dict = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]])
elif yolos_name == "yolos_s_200_pre":
lowercase :Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]])
lowercase :List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]])
elif yolos_name == "yolos_s_300_pre":
lowercase :int = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]])
lowercase :Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]])
elif yolos_name == "yolos_s_dWr":
lowercase :int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]])
lowercase :Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]])
elif yolos_name == "yolos_base":
lowercase :Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]])
lowercase :Tuple = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
lowercase :Optional[int] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
lowercase :Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the ๐ค hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 | 0 |
'''simple docstring'''
from math import ceil
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = list(range(0 , __lowerCAmelCase ) )
_UpperCAmelCase : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_UpperCAmelCase : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowerCAmelCase )
# Missing blocks
_UpperCAmelCase : str = [i for i in blocks if i not in device_map_blocks]
_UpperCAmelCase : int = [i for i in device_map_blocks if i not in blocks]
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = list(range(__lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = int(ceil(n_layers / len(__lowerCAmelCase ) ) )
_UpperCAmelCase : str = [layers[i : i + n_blocks] for i in range(0 , __lowerCAmelCase , __lowerCAmelCase )]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
| 234 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tmp_path / "cache"
_UpperCAmelCase : int = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Any = {"text": "string"}
_UpperCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
_UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = tmp_path / "cache"
_UpperCAmelCase : Dict = {"text": "string"}
_UpperCAmelCase : Union[str, Any] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = text_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = [text_path]
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Union[str, Any] = {"text": "string"}
_UpperCAmelCase : Optional[int] = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=("train",) ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
_UpperCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
_UpperCAmelCase : Tuple = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase : Any = TextDatasetReader({"train": text_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_UpperCAmelCase : List[Any] = {"text": "string"}
_UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
_UpperCAmelCase : Optional[int] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase : Tuple = TextDatasetReader({"train": text_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if split:
_UpperCAmelCase : int = {split: text_path}
else:
_UpperCAmelCase : Tuple = "train"
_UpperCAmelCase : List[str] = {"train": text_path, "test": text_path}
_UpperCAmelCase : Optional[Any] = tmp_path / "cache"
_UpperCAmelCase : Optional[int] = {"text": "string"}
_UpperCAmelCase : int = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 234 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Tuple , A : str , A : Optional[Any]=1_3 , A : Union[str, Any]=7 , A : int=True , A : int=True , A : List[Any]=True , A : Optional[Any]=True , A : Optional[Any]=9_9 , A : Optional[int]=1_6 , A : Optional[Any]=3_6 , A : Optional[Any]=6 , A : List[Any]=6 , A : Optional[int]=6 , A : Dict=3_7 , A : Optional[Any]="gelu" , A : Tuple=0.1 , A : Any=0.1 , A : str=5_1_2 , A : int=1_6 , A : List[Any]=2 , A : Dict=0.02 , A : Union[str, Any]=3 , A : str=4 , A : List[Any]=None , ):
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_token_type_ids
_UpperCAmelCase : Any = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : List[Any] = embedding_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_hidden_groups
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Optional[Any] = type_sequence_label_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : Any = scope
def snake_case_ ( self : int ):
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Tuple ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case_ ( self : Dict , A : int , A : Optional[int] , A : Dict , A : int , A : Tuple , A : Dict , A : List[Any] ):
_UpperCAmelCase : Optional[int] = AlbertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , token_type_ids=A )
_UpperCAmelCase : List[Any] = model(A , token_type_ids=A )
_UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : List[str] , A : Optional[int] , A : Union[str, Any] , A : Optional[Any] , A : int , A : str , A : List[str] , A : Optional[int] ):
_UpperCAmelCase : Dict = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case_ ( self : Optional[int] , A : List[Any] , A : Dict , A : Any , A : Any , A : Optional[int] , A : Optional[Any] , A : Dict ):
_UpperCAmelCase : str = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Tuple , A : Optional[int] , A : Dict , A : List[Any] , A : Any , A : str , A : Optional[Any] , A : List[Any] ):
_UpperCAmelCase : int = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : Dict , A : Optional[int] , A : List[str] , A : Dict , A : str , A : List[Any] , A : int , A : Optional[Any] ):
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : str = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Union[str, Any] , A : int , A : Optional[int] , A : int , A : List[Any] , A : Any , A : Tuple , A : Dict ):
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : List[str] = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Dict , A : int , A : int , A : Tuple , A : Any , A : str , A : Optional[int] , A : Optional[Any] ):
_UpperCAmelCase : Dict = self.num_choices
_UpperCAmelCase : Any = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = config_and_inputs
_UpperCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = True
def snake_case_ ( self : str , A : Tuple , A : int , A : Optional[Any]=False ):
_UpperCAmelCase : List[Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
_UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
_UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : List[Any] = AlbertModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def snake_case_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def snake_case_ ( self : int ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
@slow
def snake_case_ ( self : Union[str, Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : str = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A )[0]
_UpperCAmelCase : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : List[str] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1e-4 ) )
| 202 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict=28_123 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCAmelCase : Any = set()
_UpperCAmelCase : List[str] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 202 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE : str = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__SCREAMING_SNAKE_CASE : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : str = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Any = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__SCREAMING_SNAKE_CASE : Tuple = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 10)
lowercase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase_ = """"""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 303 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = MgpstrTokenizer
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Any = {}
SCREAMING_SNAKE_CASE__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
super().setUp()
# fmt: off
_UpperCamelCase : Optional[Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__a : Optional[int] ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Tuple ) -> int:
_UpperCamelCase : List[str] = "tester"
_UpperCamelCase : int = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
_UpperCamelCase : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Dict = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
_UpperCamelCase : int = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
_UpperCamelCase : Union[str, Any] = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Dict = self.get_input_output_texts(__a )
_UpperCamelCase : List[str] = tokenizer.tokenize(__a )
_UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
_UpperCamelCase : Union[str, Any] = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(" " , "" ) , __a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
pass
| 360 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase__ = {"UserAgent": UserAgent().random}
def lowercase__ ( lowercase_ ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = script.contents[0]
_UpperCamelCase : Any = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = F'''https://www.instagram.com/{username}/'''
_UpperCamelCase : Optional[Any] = self.get_json()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> dict:
_UpperCamelCase : int = requests.get(self.url , headers=__a ).text
_UpperCamelCase : Union[str, Any] = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : str ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
return self.user_data["username"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["full_name"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.user_data["biography"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["business_email"]
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["external_url"]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
return self.user_data["is_verified"]
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> bool:
return self.user_data["is_private"]
def lowercase__ ( lowercase_ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
_UpperCamelCase : Union[str, Any] = InstagramUser(lowercase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,lowercase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 310 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [True] * limit
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE : Any = i * 2
while index < limit:
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = index + i
SCREAMING_SNAKE_CASE : Tuple = [2]
for i in range(3 ,__UpperCamelCase ,2 ):
if is_prime[i]:
primes.append(__UpperCamelCase )
return primes
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_sieve(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__UpperCamelCase ) ):
for j in range(i + length ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE : Dict = j - i
SCREAMING_SNAKE_CASE : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 5_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 320 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = EfficientNetConfig()
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""]
__UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = 1_0_0_0
__UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__UpperCAmelCase : str = sorted(set(_UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
__UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__UpperCAmelCase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__UpperCAmelCase : List[str] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__UpperCAmelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1]
__UpperCAmelCase : Tuple = """classifier.weight"""
__UpperCAmelCase : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = model_classes[model_name](
include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
__UpperCAmelCase : List[str] = original_model.trainable_variables
__UpperCAmelCase : List[Any] = original_model.non_trainable_variables
__UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCAmelCase : int = param.numpy()
__UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
__UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__UpperCAmelCase : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase )
__UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 )
__UpperCAmelCase : str = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCAmelCase : List[str] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 320 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
for attribute in key.split("." ):
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : Union[str, Any] = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case : Optional[Any] = value
elif weight_type == "weight_g":
snake_case : str = value
elif weight_type == "weight_v":
snake_case : Dict = value
elif weight_type == "bias":
snake_case : Dict = value
else:
snake_case : Tuple = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any ):
snake_case : List[Any] = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case : Dict = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Dict = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : int = "weight_g"
elif "weight_v" in name:
snake_case : Dict = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : List[Any] = "weight"
else:
snake_case : List[str] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any ):
snake_case : Dict = full_name.split("conv_layers." )[-1]
snake_case : List[Any] = name.split("." )
snake_case : Tuple = int(items[0] )
snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=None ):
# load the pre-trained checkpoints
snake_case : Optional[int] = torch.load(__lowerCamelCase )
snake_case : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
snake_case : Optional[int] = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
snake_case : str = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : Optional[int] = WavLMConfig()
snake_case : Any = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 59 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = ['model.decoder.embed_positions.weights']
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if "emb" in name:
A_ : Tuple = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
A_ : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
A_ : Optional[Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
A_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
A_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
A_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
A_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
A_ : Dict = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
A_ : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
A_ : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : Tuple = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = list(state_dict.keys() )
A_ : List[Any] = {}
for key in keys:
A_ : List[str] = state_dict.pop(_UpperCAmelCase )
A_ : Tuple = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Any = val[:hidden_size, :]
A_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if checkpoint == "small":
# default config values
A_ : Optional[Any] = 1024
A_ : Tuple = 24
A_ : int = 16
elif checkpoint == "medium":
A_ : Any = 1536
A_ : Union[str, Any] = 48
A_ : List[Any] = 24
elif checkpoint == "large":
A_ : Optional[int] = 2048
A_ : Optional[int] = 48
A_ : Tuple = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A_ : Tuple = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
"""simple docstring"""
A_ : Any = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A_ : str = decoder_config_from_checkpoint(_UpperCAmelCase )
A_ : Optional[int] = fairseq_model.lm.state_dict()
A_ , A_ : str = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : List[str] = TaEncoderModel.from_pretrained('''t5-base''' )
A_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
A_ : Union[str, Any] = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : Tuple = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
A_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
A_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
A_ : Optional[int] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A_ : Tuple = 2048
A_ : Union[str, Any] = 2048
# set other default generation config params
A_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the ๐ค hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_a : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""",__SCREAMING_SNAKE_CASE,)
super().__init__(*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ="""decision_transformer"""
a : List[Any] =["""past_key_values"""]
a : Dict ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-5,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=5_02_56,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
| 46 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowercase ='''lm_head'''
_lowercase =getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowercase =getattr(__snake_case , __snake_case ).shape
else:
_lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
else:
_lowercase =value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
_lowercase ='''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__snake_case )[0].split('''.''' )[-2]
_lowercase =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_lowercase ='''weight_g'''
elif "weight_v" in name:
_lowercase ='''weight_v'''
elif "bias" in name:
_lowercase ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase ='''weight'''
else:
_lowercase =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =full_name.split('''conv_layers.''' )[-1]
_lowercase =name.split('''.''' )
_lowercase =int(items[0] )
_lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_lowercase =UniSpeechConfig.from_pretrained(__snake_case )
else:
_lowercase =UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowercase =Dictionary.load_from_json(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase =target_dict.pad_index
_lowercase =target_dict.bos_index
_lowercase =target_dict.eos_index
_lowercase =len(target_dict.symbols )
_lowercase =os.path.join(__snake_case , '''vocab.json''' )
if not os.path.isdir(__snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =target_dict.indices
# fairseq has the <pad> and <s> switched
_lowercase =42
_lowercase =43
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__snake_case , __snake_case )
_lowercase =WavaVecaPhonemeCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
_lowercase =True if config.feat_extract_norm == '''layer''' else False
_lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
_lowercase =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
_lowercase =UniSpeechForCTC(__snake_case )
else:
_lowercase =UniSpeechForPreTraining(__snake_case )
if is_finetuned:
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase =model[0].eval()
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_unispeech.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 5 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( a ) -> int:
_A: int = filter(lambda a : p.requires_grad , model.parameters() )
_A: Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : str = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Dict:
if metric == "rouge2":
_A: Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_A: List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_A: str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_A: List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a , verbose=a , )
class UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Union[str, Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_A: Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_A: Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A: List[str] = od / '''test_results.txt'''
_A: Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A: Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_A: Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A: Optional[int] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_A: List[str] = val.item()
_A: List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
_A: Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
try:
_A: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_A: Any = pl_module.model.num_parameters()
_A: Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 121 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase ( a_ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCAmelCase ( a_ , a_ ) -> XGBClassifier:
"""simple docstring"""
A_ : List[Any] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : List[str] = load_iris()
A_ : Optional[int] = data_handling(_lowercase )
A_ : Tuple = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
A_ : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
A_ : Dict = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 363 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=64 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) -> Union[str, Any]:
A_ : Tuple = parent
A_ : Optional[Any] = batch_size
A_ : Optional[Any] = seq_length
A_ : List[str] = is_training
A_ : str = use_input_mask
A_ : List[str] = use_token_type_ids
A_ : Tuple = use_labels
A_ : List[str] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : int = num_labels
A_ : int = num_choices
A_ : Optional[int] = scope
def UpperCAmelCase_ ( self ) -> Optional[int]:
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Dict = None
if self.use_input_mask:
A_ : int = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
A_ : Optional[int] = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : int = MPNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : str = MPNetForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Tuple = self.num_labels
A_ : List[Any] = MPNetForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
A_ : int = self.num_choices
A_ : Dict = MPNetForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Optional[int] = self.num_labels
A_ : Tuple = MPNetForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : int = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : Any = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = True
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = MPNetModelTester(self )
A_ : int = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_lowerCamelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : int = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
A_ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : Tuple = model(_lowerCamelCase )[0]
A_ : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
A_ : Any = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 164 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[Any] = data
# Initialize hash values
__a : Optional[int] = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
__a : Union[str, Any] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
__a : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
__a : Optional[int] = b'\x80' + (b'\x00' * (63 - (len(__a ) + 8) % 64))
__a : Dict = struct.pack('>Q' , (len(__a ) * 8) )
return data + padding + big_endian_integer
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Dict = list(struct.unpack('>16L' , __a ) )
# add 48 0-ed integers
words += [0] * 48
__a : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__a : str = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__a : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__a : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__a : Tuple = self.ror(__a , 6 ) ^ self.ror(__a , 11 ) ^ self.ror(__a , 25 )
__a : List[Any] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
__a : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__a : List[str] = self.ror(__a , 2 ) ^ self.ror(__a , 13 ) ^ self.ror(__a , 22 )
__a : List[Any] = (a & b) ^ (a & c) ^ (b & c)
__a : int = (sa + maj) % 0x1_00_00_00_00
__a : Union[str, Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__a : Any = [a, b, c, d, e, f, g, h]
# Modify final values
__a : str = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__a : Optional[Any] = ''.join([hex(__a )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
import hashlib
__a : Dict = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(__a ).hash , hashlib.shaaaa(__a ).hexdigest() )
def lowerCamelCase ():
import doctest
doctest.testmod()
__a : Any = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__a : Any = parser.parse_args()
__a : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__a : Optional[Any] = f.read()
else:
__a : Optional[int] = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE_ ).hash )
if __name__ == "__main__":
main()
| 27 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , a : Optional[Any] , a : int=13 , a : str=7 , a : str=True , a : List[str]=True , a : Optional[Any]=True , a : int=True , a : List[Any]=99 , a : List[Any]=32 , a : Tuple=5 , a : Any=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Any=0.1 , a : int=0.1 , a : List[Any]=128 , a : Union[str, Any]=32 , a : Union[str, Any]=16 , a : Dict=2 , a : List[Any]=0.0_2 , a : Optional[Any]=3 , a : List[Any]=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : Union[str, Any] = scope
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : List[str] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = NezhaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : List[str] = model(a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Tuple , a : List[Any] , a : Tuple , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = NezhaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : List[Any] , a : str , a : List[str] , a : Tuple , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : List[Any] , a : int , a : List[str] , a : Union[str, Any] , a : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : int , a : Optional[int] , a : str , a : List[str] , a : int , a : Dict , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : List[str] , a : Any , a : Any , a : Union[str, Any] , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : str , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Dict , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[Any] = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : Dict , a : str , a : Optional[Any] , a : Optional[int] , a : List[str] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : str = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : int , a : Tuple , a : List[Any] , a : Tuple , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Any = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : str , a : Tuple , a : int , a : Dict=False ):
'''simple docstring'''
lowerCAmelCase__ : int = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = NezhaModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : str = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Any = model_class(config=a )
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(a , a )
lowerCAmelCase__ : int = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'bert.pt' ) )
lowerCAmelCase__ : Any = torch.jit.load(os.path.join(a , 'bert.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 212 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
'''simple docstring'''
if index == r:
for j in range(__lowerCAmelCase ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase : Optional[Any] =arr[i]
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 , __lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : str =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 , __lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 78 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78 | 1 |
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase__ = kwargs.pop('''main_process_only''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''in_order''' , UpperCamelCase_ )
if self.isEnabledFor(UpperCamelCase_ ):
if self._should_log(UpperCamelCase_ ):
lowercase__ , lowercase__ = self.process(UpperCamelCase_ , UpperCamelCase_ )
self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
elif in_order:
lowercase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase__ , lowercase__ = self.process(UpperCamelCase_ , UpperCamelCase_ )
self.logger.log(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
state.wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if log_level is None:
lowercase__ = os.environ.get('''ACCELERATE_LOG_LEVEL''' , SCREAMING_SNAKE_CASE )
lowercase__ = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 110 |
import socket
def _a ( ):
"""simple docstring"""
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowercase__ = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 110 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = XLMRobertaTokenizer
UpperCamelCase_ : Union[str, Any] = XLMRobertaTokenizerFast
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Union[str, Any] = True
def _A ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = "<pad>"
SCREAMING_SNAKE_CASE : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCAmelCase_ ) , 1002 )
def _A ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase_ , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"รฉ",
".",
] , )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _A ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = tokenizer_r.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@cached_property
def _A ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _A ( self : str ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase_ , f.name )
SCREAMING_SNAKE_CASE : Tuple = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pickle.dumps(UpperCAmelCase_ )
pickle.loads(UpperCAmelCase_ )
def _A ( self : str ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = "I was born in 92000, and this is falsรฉ."
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = "Hello World!"
SCREAMING_SNAKE_CASE : List[str] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE : int = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def _A ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 319 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
__lowerCAmelCase : int ={
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
__lowerCAmelCase : Optional[int] ={
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(_lowerCamelCase )
return pairs
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :str="<s>" , lowercase_ :Optional[Any]="</s>" , lowercase_ :Optional[Any]="</s>" , lowercase_ :Any="<s>" , lowercase_ :List[str]="<unk>" , lowercase_ :Any="<pad>" , lowercase_ :Tuple="<mask>" , **lowercase_ :Optional[int] , )-> List[Any]:
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
A__ = vocab_file
A__ = merges_file
A__ = {}
A__ = 0
A__ = 1
A__ = 2
A__ = 3
self.add_from_file(lowercase_ )
A__ = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[:-1]
A__ = [tuple(merge.split()[:-1] ) for merge in merges]
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = {}
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self :str , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self :List[str] )-> str:
return len(self.encoder )
def UpperCAmelCase_ ( self :Optional[int] )-> int:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any )-> str:
if token in self.cache:
return self.cache[token]
A__ = tuple(lowercase_ )
A__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A__ = get_pairs(lowercase_ )
if not pairs:
return token
while True:
A__ = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__, A__ = bigram
A__ = []
A__ = 0
while i < len(lowercase_ ):
try:
A__ = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(lowercase_ )
A__ = new_word
if len(lowercase_ ) == 1:
break
else:
A__ = get_pairs(lowercase_ )
A__ = "@@ ".join(lowercase_ )
A__ = word[:-4]
A__ = word
return word
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Any )-> Optional[int]:
A__ = []
A__ = re.findall(R"\S+\n?" , lowercase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase_ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase_ ( self :Any , lowercase_ :Optional[Any] )-> str:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] )-> Tuple:
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> int:
A__ = " ".join(lowercase_ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowercase_ ):
copyfile(self.merges_file , lowercase_ )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self :Any , lowercase_ :Any )-> Any:
if isinstance(lowercase_ , lowercase_ ):
try:
with open(lowercase_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowercase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ = f.readlines()
for lineTmp in lines:
A__ = lineTmp.strip()
A__ = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
A__ = line[:idx]
A__ = len(self.encoder )
| 237 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase : Optional[Any] =[8, 5, 9, 7]
__lowerCAmelCase : Optional[int] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase : List[Any] =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :list[int] , lowercase_ :list[list[int]] , lowercase_ :list[list[int]] , )-> None:
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCAmelCase_ ( self :Union[str, Any] )-> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self :Tuple )-> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self :Dict )-> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self :Any )-> dict[int, list[int]]:
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self :Tuple , **lowercase_ :List[str] )-> None:
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
A__ = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase_ ) + 1}"
+ " ".join(F"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 | 1 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__A = get_logger(__name__)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase = None ):
lowercase__: Optional[Any] = (
os.path.join(_UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase__: Dict = Extractor
def _snake_case ( self , _UpperCAmelCase ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase__: List[str] = os.path.abspath(_UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCAmelCase ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
return force_extract or (
not os.path.isfile(_UpperCAmelCase ) and not (os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ))
)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
lowercase__: Tuple = self.extractor.infer_extractor_format(_UpperCAmelCase )
if not extractor_format:
return input_path
lowercase__: int = self._get_output_path(_UpperCAmelCase )
if self._do_extract(_UpperCAmelCase , _UpperCAmelCase ):
self.extractor.extract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return output_path
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@classmethod
@abstractmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
...
@staticmethod
@abstractmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
...
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[bytes] = []
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with open(_UpperCAmelCase , '''rb''' ) as f:
return f.read(_UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase = b"" ):
if not magic_number:
lowercase__: Dict = max(len(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowercase__: Optional[int] = cls.read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
return tarfile.is_tarfile(_UpperCAmelCase )
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def resolved(_UpperCAmelCase ) -> str:
return os.path.realpath(os.path.abspath(_UpperCAmelCase ) )
def badpath(_UpperCAmelCase , _UpperCAmelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ).startswith(_UpperCAmelCase )
def badlink(_UpperCAmelCase , _UpperCAmelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase__: Tuple = resolved(os.path.join(_UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCAmelCase )
lowercase__: int = resolved(_UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , _UpperCAmelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowercase__: str = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase , members=TarExtractor.safemembers(_UpperCAmelCase , _UpperCAmelCase ) )
tar_file.close()
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = [B"\x1F\x8B"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with gzip.open(_UpperCAmelCase , '''rb''' ) as gzip_file:
with open(_UpperCAmelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = [
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase = b"" ):
if super().is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCAmelCase , '''rb''' ) as fp:
lowercase__: Union[str, Any] = _EndRecData(_UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase__: int = fp.read(_UpperCAmelCase ) # CD is where we expect it to be
if len(_UpperCAmelCase ) == sizeCentralDir:
lowercase__: str = struct.unpack(_UpperCAmelCase , _UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with zipfile.ZipFile(_UpperCAmelCase , '''r''' ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = [B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with lzma.open(_UpperCAmelCase ) as compressed_file:
with open(_UpperCAmelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
lowercase__: List[Any] = rarfile.RarFile(_UpperCAmelCase )
rf.extractall(_UpperCAmelCase )
rf.close()
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = [B"\x28\xb5\x2F\xFD"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase__: Tuple = zstd.ZstdDecompressor()
with open(_UpperCAmelCase , '''rb''' ) as ifh, open(_UpperCAmelCase , '''wb''' ) as ofh:
dctx.copy_stream(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = [B"\x42\x5A\x68"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
with bza.open(_UpperCAmelCase , '''rb''' ) as compressed_file:
with open(_UpperCAmelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = [B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with pyazr.SevenZipFile(_UpperCAmelCase , '''r''' ) as archive:
archive.extractall(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = [B"\x04\x22\x4D\x18"]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_UpperCAmelCase , '''rb''' ) as compressed_file:
with open(_UpperCAmelCase , '''wb''' ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls ):
return max(
len(_UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCAmelCase , _UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCAmelCase , magic_number_length=_UpperCAmelCase )
except OSError:
return b""
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_UpperCAmelCase , )
lowercase__: List[str] = cls.infer_extractor_format(_UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls , _UpperCAmelCase ): # <Added version="2.4.0"/>
lowercase__: int = cls._get_magic_number_max_length()
lowercase__: List[Any] = cls._read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return extractor_format
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = "deprecated" , ):
os.makedirs(os.path.dirname(_UpperCAmelCase ) , exist_ok=_UpperCAmelCase )
# Prevent parallel extractions
lowercase__: Optional[int] = str(Path(_UpperCAmelCase ).with_suffix('''.lock''' ) )
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_UpperCAmelCase , )
lowercase__: int = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase__: Union[str, Any] = cls.extractors[extractor_format]
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCAmelCase ):
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
| 370 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__A = {"allegro/herbert-base-cased": 5_14}
__A = {}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = HerbertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__="</s>" , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 90 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A_ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _snake_case :
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ):
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self : List[Any] ):
return self.major, self.minor, self.patch
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return Version(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return other
raise TypeError(F'''{other} (type {type(SCREAMING_SNAKE_CASE__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
try:
SCREAMING_SNAKE_CASE:List[str] = self._validate_operand(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = self._validate_operand(SCREAMING_SNAKE_CASE__ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self : Tuple ):
return self.version_str
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( snake_case ):
return ".".join(str(snake_case ) for v in version_tuple )
| 139 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _snake_case( SCREAMING_SNAKE_CASE__ = "" ) -> dict[str, float]:
lowercase : Dict = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowercase : Tuple = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ ).text , """html.parser""" )
lowercase : Optional[Any] = soup.find_all("""td""" , attrs="""titleColumn""" )
lowercase : Union[str, Any] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
}
def _snake_case( SCREAMING_SNAKE_CASE__ = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase : str = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE__ , """w""" , newline="""""" ) as out_file:
lowercase : Tuple = csv.writer(SCREAMING_SNAKE_CASE__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 368 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case( ) -> tuple[list[int], int]:
lowercase : List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
lowercase : List[Any] = make_dataset()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE__ , 3 ):
if sum(SCREAMING_SNAKE_CASE__ ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE__ ) )
return (0, 0, 0)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, int, int]:
arr.sort()
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
lowercase , lowercase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case( ) -> tuple[float, float]:
lowercase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowercase : Tuple = """
triplet_sum1(*dataset)
"""
lowercase : int = """
triplet_sum2(*dataset)
"""
lowercase : str = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
lowercase : Dict = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE__ ), min(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 285 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
def __init__(self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 65 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: int=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: int=64 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: int=2 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: List[str]=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Optional[Any] = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Optional[int] = use_input_mask
__lowerCAmelCase : Dict = use_token_type_ids
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : List[Any] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : Dict = initializer_range
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
__lowerCAmelCase : Optional[Any] = q_groups
__lowerCAmelCase : Optional[int] = k_groups
__lowerCAmelCase : Any = v_groups
__lowerCAmelCase : int = post_attention_groups
__lowerCAmelCase : List[str] = intermediate_groups
__lowerCAmelCase : Optional[Any] = output_groups
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : str = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Union[str, Any] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[int] = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_choices
__lowerCAmelCase : str = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : int = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = SqueezeBertModelTester(self)
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Any) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: int) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
__lowerCAmelCase : List[Any] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]])
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Any = torch.Size((1, 3))
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]])
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4))
| 269 | 0 |
'''simple docstring'''
import math
class UpperCamelCase_ :
def __init__( self , A=0 ) -> Any: # a graph with Node 0,1,...,N-1
UpperCAmelCase : List[str] = n
UpperCAmelCase : Any = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
UpperCAmelCase : int = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = w
def _lowercase( self ) -> List[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def _lowercase( self , A , A ) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
a : int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 356 |
'''simple docstring'''
from math import loga
def __lowerCamelCase ( _lowercase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_lowercase , _lowercase ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = WavaVecaPhonemeCTCTokenizer
_SCREAMING_SNAKE_CASE :Optional[Any] = False
def _a ( self ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""<s> <pad> </s> <unk> n s t ษ l a i k d m ษ ษพ e ษช p o ษ z รฐ f j v b ษน ส ส iห r w ส u ษก รฆ aษช ส h ษ ษห """
"""ล ษ eษช ฮฒ uห y ษฬ oส แตป eห ฮธ aส ts oห ษฬ ษฃ ษ ษ dส ษl x ษห รง ส tส ษห ษหษน ษฬ ส ษหษน ส aห ษ ล รธ oหษน ษฒ yห """
"""ส iษ i5 s. tษ ?? nสฒ ษห ลฬ ษญ ษรธ ส tสฒ ษจ ษษน ts. rสฒ ษชษน ษญสฒ i.5 ษษช q sสฒ u5 สษน iษ a5 iษ5 รธห ส ja ษษ th ษ5 """
"""oษช dสฒ ษ5 tษh ts.h mสฒ ษฏ dส vสฒ eฬ tสสฒ ei5 o5 onษก5 ษu5 iษ5 ai5 aษชษ kh ษ1 ส i2 ส ฤง t[ aษชษ สฒ ju ษ2 u2 oษ """
"""pห iษษ ou5 y5 uษ tห uo5 d[ uoษ tsh ษษ ษต iฬช5 uei5 ษ aษ ษษจ i.ษ eส o2 ษฬ รค pสฒ kสฒ nฬฉ ษ ph ษu2 uษจ ษษช ษซ ษฌ """
"""yษ bสฒ ษ2 sฬช aiษ ฯ ษฬสฬ 1 ษ4 yรฆษ a2 ษจห tฬช iouษ uฬ onษกษ aษจ iษ2 ษษจ ษuษ oฬ ei2 iou2 c kห y2 ษ oe dหค yษษ """
"""ษส S ษกสฒ onษก2 u\" eiษ ส ษฏแต iou5 dZ rฬฬ i.2 tS s^ ส yษ5 iษษ uษ5 pf ษจu iษ2 ou2 ษr2 fสฒ ai2 rฬ uษษ ษณ ษษจ """
"""ua5 uษช ษฝ bห yu5 uo2 yษ5 lฬฉ ษป ษrษ ส iฬช2 ouษ uaษ a. a.ห yรฆ5 dห rฬฉ ee ษชu ษr5 iฬช ษ รฆi u: i.ห t^ o1 ษช^ """
"""ai ueiษ รฆห ษษช eษ i. ษด ie ua2 ษ1 o4 tสห o: ษ: u1 N iฬช1 au yรฆ2 u. qห yษษ y: kสฐ tสสฐ iส sx oฬ uo tสฐ """
"""uai5 bสฐ u.ห uษ2 สษ d^ sฬชห yiษ dสฐ r. oe: i1 ษห yu2 nสฒสฒ iฬช4 uei2 tsสฒ ษธ iฬ ษ4 tฬชห eษ u4 e: tsห สสฐ ษกสฐ """
"""ษฏษฏ dสสฒ สสฒ X ษตห uaiษ tษสฒ aฬ t^ห eฬห yษ2 cห i.1 ษส dหคdหค dสห i4 ษกห yi ษสฒ ษสฐ pสฐ dสสฒ yuษ ua1 ua4 รฆiห ษษ """
"""ui iou1 สห a1 iou4 cสฐ iษ1 yษ2 ษสฐ eฬ สสฒ รครค ษr4 iหห ษชห iษ1 ษr1 ลห รธi ษชuห cสฐcสฐ ษห1 iห1 ลฉ kสฐห oฬoฬ xสฒ """
"""ou1 iษ4 eฬeฬ y1 dzห dสฒสฒ dสฐห ษฏแตษฏแต lห uo1 i.4 i: yษ5สฒ a4"""
).split(""" """ )
SCREAMING_SNAKE_CASE__ : Any = dict(zip(_a , range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
def _a ( self , _a , _a=False , _a=20 , _a=5 ) -> Tuple[str, list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_a )) for i in range(len(_a ) )]
SCREAMING_SNAKE_CASE__ : Any = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
SCREAMING_SNAKE_CASE__ : int = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__ : Any = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
SCREAMING_SNAKE_CASE__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__ : Optional[int] = """ """ + output_txt
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def _a ( self , **_a ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer("""m xxx ษช""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer("""m aaa ษช ccc""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer("""maษช c""" , do_phonemize=_a ).input_ids
self.assertEqual(_a , [3, 200] ) # mai should be <unk> (=3)
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
SCREAMING_SNAKE_CASE__ : Dict = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(_a , """h ษ l oส h aส ษหษน j uห""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
SCREAMING_SNAKE_CASE__ : Any = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.decode(tokenizer(_a ).input_ids )
self.assertEqual(_a , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ษพ ษพ l ษญสฒ""", """j รฐ s j รฐ s oหษน"""] )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(_a , """h ษ l oส | h aส | ษหษน | j uห |""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(_a ).input_ids , tokenizer(_a , do_phonemize=_a ).input_ids )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ษพ ษพ l ษญสฒ""", """j รฐ s j รฐ s oหษน"""] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(_a , filter_word_delimiter_token=_a )
self.assertEqual(_a , batch_tokens[0] )
self.assertEqual(_a , ["""k s ษพ | ษพ l | ษญสฒ""", """| j รฐ | s j รฐ s oหษน"""] )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
SCREAMING_SNAKE_CASE__ : Any = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : Any = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(_a , _a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
SCREAMING_SNAKE_CASE__ : Tuple = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : str = tokenizer.phonemize(_a , phonemizer_lang="""en-us""" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(tokenizer(_a ).input_ids , filter_word_delimiter_token=_a )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=_a )
SCREAMING_SNAKE_CASE__ : Any = """Hello how are you"""
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_a , phonemizer_lang="""en-us""" ).input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(_a , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.decode(_a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(_a )
self.assertEqual(_a , """h ษ l oส h aส ษหษน j uห""" )
self.assertEqual(_a , """ษ l o h aส a ส j u""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
SCREAMING_SNAKE_CASE__ : List[str] = """Hello how Are you"""
SCREAMING_SNAKE_CASE__ : str = """hello how are you"""
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a ).input_ids
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a ).input_ids
self.assertEqual(_a , _a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
SCREAMING_SNAKE_CASE__ : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(_a )
self.assertEqual(_a , ["""k s ษพ ษพ l ษญสฒ!?!? $$$""", """j รฐ s j รฐ s oหษน $$$"""] )
@staticmethod
def _a ( _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [d[key] for d in offsets]
return retrieved_list
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssษพษพ|ษพษพ<pad>ษพษพ|<pad>ษพlll|ษญสฒ -> k s ษพ ษพ | ษพ l | ษญสฒ"
SCREAMING_SNAKE_CASE__ : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(_a , output_char_offsets=_a , filter_word_delimiter_token=_a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(_a , _a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ษพ""", """ษพ""", """|""", """ษพ""", """l""", """|""", """ษญสฒ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(_a , _a ):
self.assertTrue(isinstance(_a , _a ) )
self.assertTrue(isinstance(outputs_list[0] , _a ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(_a , _a ):
if isinstance(_a , _a ):
[recursive_check(_a , _a ) for la, la in zip(_a , _a )]
self.assertEqual(_a , _a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(_a , output_char_offsets=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [tokenizer.decode(_a , output_char_offsets=_a ) for ids in sample_ids]
check_list_tuples_equal(_a , _a )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.add_tokens(_a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : str = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : Any = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.add_special_tokens(_a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _a ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : Any = ["""รฐ""", """ษช""", """s""", """ษช""", """z""", """ษ""", """t""", """ษ""", """k""", """s""", """t"""]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(output["""text"""] , _a )
| 132 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(__lowerCAmelCase )
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def _lowercase ( __lowerCAmelCase ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE__ : str = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_input(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : str = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 132 | 1 |
'''simple docstring'''
import os
def _A () -> Any:
'''simple docstring'''
_a = os.path.join(os.path.dirname(_lowercase ) , 'num.txt' )
with open(_lowercase ) as file_hand:
return str(sum(int(_lowercase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 359 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = x
_a = y
for step in range(lowerCAmelCase__ ): # noqa: B007
_a = a * a - b * b + x
_a = 2 * a * b + y
_a = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def _A (lowerCAmelCase__ :int = 8_00 , lowerCAmelCase__ :int = 6_00 , lowerCAmelCase__ :float = -0.6 , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 3.2 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :bool = True , ) -> Image.Image:
'''simple docstring'''
_a = Image.new('RGB' , (image_width, image_height) )
_a = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
_a = figure_width / image_width * image_height
_a = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a = get_color_coded_rgb(lowerCAmelCase__ )
else:
_a = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 | 0 |
'''simple docstring'''
def A_ ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCamelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 288 | 0 |
'''simple docstring'''
import math
def lowerCamelCase__ ( _A , _A ):
a : int = len(_A )
a : Optional[int] = int(math.floor(math.sqrt(_A ) ) )
a : Any = 0
while arr[min(_A , _A ) - 1] < x:
a : Any = step
step += int(math.floor(math.sqrt(_A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a : Optional[Any] = prev + 1
if prev == min(_A , _A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
lowerCAmelCase: Optional[Any] = int(input('Enter the number to be searched:\n'))
lowerCAmelCase: Optional[int] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"Number {x} is at index {res}")
| 96 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( _A , _A , _A=0 ):
# Format the message.
if name is None:
a : Tuple = None
else:
a : Dict = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
a : Tuple = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , ':' , val.size() )
else:
print(_A , ':' , _A )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
a : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
a : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
a : int = param.view(*_A )
a : List[str] = param.transpose(0 , 2 )
a : Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
a : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
a : List[str] = param.view(*_A )
a : Union[str, Any] = param.transpose(0 , 1 ).contiguous()
a : List[Any] = param.view(*_A )
return param
def lowerCamelCase__ ( _A , _A , _A ):
# The converted output model.
a : Optional[Any] = {}
# old versions did not store training args
a : Dict = input_state_dict.get('args' , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
a : Union[str, Any] = ds_args.padded_vocab_size
a : str = ds_args.max_position_embeddings
a : Dict = ds_args.hidden_size
a : Union[str, Any] = ds_args.num_layers
a : Dict = ds_args.num_attention_heads
a : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
a : Any = config.n_head
# The hidden_size per head.
a : Tuple = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
a : Any = input_state_dict['checkpoint_version']
else:
a : Any = 0.0
# The model.
a : Optional[int] = input_state_dict['model']
# The language model.
a : Optional[Any] = model['language_model']
# The embeddings.
a : List[str] = lm['embedding']
# The word embeddings.
a : List[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
a : Dict = word_embeddings[: config.vocab_size, :]
a : int = word_embeddings
# The position embeddings.
a : Tuple = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
a : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
a : Optional[Any] = pos_embeddings
# The transformer.
a : Union[str, Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
a : List[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
a : Optional[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
a : Tuple = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
a : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
a : Optional[int] = m.group(2 )
# Is it a weight or a bias?
a : Optional[int] = m.group(3 )
# The name of the layer.
a : Any = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
a : str = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
a : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
a : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
a : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
a : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
a : List[str] = masked_bias
a : Union[str, Any] = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
a : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
a : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
a : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
a : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
a : Tuple = megatron_to_transformers[op_name]
a : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
a : Dict = megatron_to_transformers[op_name]
a : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
a : str = transformer['final_layernorm.weight']
a : List[str] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
a : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
# Create the argument parser.
a : Dict = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_A , help='An optional config json file describing the pre-trained model.' , )
a : Union[str, Any] = parser.parse_args()
# Extract the basename.
a : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
a : Union[str, Any] = torch.load(_A , map_location='cpu' )
else:
a : Any = torch.load(args.path_to_checkpoint , map_location='cpu' )
a : List[Any] = input_state_dict.get('args' , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
a : int = 'gelu_fast'
elif ds_args.openai_gelu:
a : Dict = 'gelu_new'
else:
a : Any = 'gelu'
else:
# in the very early days this used to be "gelu_new"
a : Any = 'gelu_new'
# Spell out all parameters in case the defaults change.
a : Tuple = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
a : str = GPTaConfig.from_json_file(args.config_file )
a : Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
a : Union[str, Any] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
a : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
a : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
a : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
a : Optional[Any] = 'gpt2'
a : Tuple = AutoTokenizer.from_pretrained(_A )
a : str = type(_A ).__name__
a : List[str] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
a : Optional[int] = os.path.join(_A , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 96 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : str ) -> Optional[Any]:
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__a = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
if "stem.conv" in name:
__a = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__a = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__a = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__a = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__a = '''bit.encoder.''' + name
return name
def lowercase ( ) -> List[Any]:
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False ) -> Dict:
__a = get_config(lowerCAmelCase__ )
# load original model from timm
__a = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
__a = timm_model.state_dict()
for key in state_dict.copy().keys():
__a = state_dict.pop(lowerCAmelCase__ )
__a = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__a = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
__a = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
__a = transform.transforms
__a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__a = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__a = prepare_img()
__a = transform(lowerCAmelCase__ ).unsqueeze(0 )
__a = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
__a = model(lowerCAmelCase__ )
__a = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__a = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 45 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
__snake_case : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__snake_case : List[str] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__snake_case : int = [2, 4, 1, 5]
__snake_case : str = len(train_data)
__snake_case : Union[str, Any] = 0.009
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : List[Any]="train" ) -> Tuple:
return calculate_hypothesis_value(_UpperCamelCase, _UpperCamelCase ) - output(
_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
A_ = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any] ) -> Dict:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _UpperCAmelCase ( _UpperCamelCase : List[str], _UpperCamelCase : Optional[Any] ) -> List[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : Any=m ) -> Union[str, Any]:
A_ = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]:
A_ = summation_of_cost_derivative(_UpperCamelCase, _UpperCamelCase ) / m
return cost_derivative_value
def _UpperCAmelCase ( ) -> Any:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A_ = 0.0_0_0_0_0_2
A_ = 0
A_ = 0
while True:
j += 1
A_ = [0, 0, 0, 0]
for i in range(0, len(_UpperCamelCase ) ):
A_ = get_cost_derivative(i - 1 )
A_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase, _UpperCamelCase, atol=_UpperCamelCase, rtol=_UpperCamelCase, ):
break
A_ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _UpperCAmelCase ( ) -> str:
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase, '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase, '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 371 |
'''simple docstring'''
from statistics import mean, stdev
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = min(_UpperCamelCase )
A_ = max(_UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min), _UpperCamelCase ) for x in data]
def _UpperCAmelCase ( _UpperCamelCase : list, _UpperCamelCase : int = 3 ) -> list:
A_ = mean(_UpperCamelCase )
A_ = stdev(_UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma), _UpperCamelCase ) for x in data]
| 18 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Optional[int] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a ( _a ):
_lowerCAmelCase = '''data2vec-vision'''
def __init__( self , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__=3 , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=[3, 5, 7, 11] , __magic_name__=[1, 2, 3, 6] , __magic_name__=True , __magic_name__=0.4 , __magic_name__=2_56 , __magic_name__=1 , __magic_name__=False , __magic_name__=2_55 , **__magic_name__ , ) -> int:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = use_mask_token
_a = use_absolute_position_embeddings
_a = use_relative_position_bias
_a = use_shared_relative_position_bias
_a = layer_scale_init_value
_a = drop_path_rate
_a = use_mean_pooling
# decode head attributes (semantic segmentation)
_a = out_indices
_a = pool_scales
# auxiliary head attributes (semantic segmentation)
_a = use_auxiliary_head
_a = auxiliary_loss_weight
_a = auxiliary_channels
_a = auxiliary_num_convs
_a = auxiliary_concat_input
_a = semantic_loss_ignore_index
class a ( _a ):
_lowerCAmelCase = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 168 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_a ):
"""simple docstring"""
lowercase = ["keras_nlp"]
def __init__( self : Dict , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
requires_backends(self , ["""keras_nlp"""] )
| 43 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( __lowerCamelCase ):
lowerCamelCase__: str = """timm_backbone"""
def __init__( self: Tuple , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Any=3 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Dict , ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : int = backbone
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[int] = features_only
__UpperCAmelCase : Optional[Any] = use_pretrained_backbone
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Dict = out_indices if out_indices is not None else (-1,)
| 157 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 195 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase :int = logging.get_logger(__name__)
class _lowerCamelCase :
'''simple docstring'''
A_ : str
A_ : str = None
@staticmethod
def __lowerCAmelCase ( ) -> int:
raise NotImplementedError
def __lowerCAmelCase ( self : List[str] , _A : Optional[int] , _A : int , _A : str , **_A : Tuple ) -> Optional[Any]:
raise NotImplementedError
def __lowerCAmelCase ( self : Any , _A : str ) -> List[Any]:
raise NotImplementedError
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def __lowerCAmelCase ( cls : Any ) -> List[str]:
return F'`pip install {cls.pip_package or cls.name}`'
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """optuna"""
@staticmethod
def __lowerCAmelCase ( ) -> Any:
return is_optuna_available()
def __lowerCAmelCase ( self : Tuple , _A : Tuple , _A : int , _A : str , **_A : Optional[int] ) -> int:
return run_hp_search_optuna(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : int , _A : Any ) -> str:
return default_hp_space_optuna(_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = """ray"""
A_ : Optional[int] = """'ray[tune]'"""
@staticmethod
def __lowerCAmelCase ( ) -> Any:
return is_ray_available()
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : int , _A : str , **_A : Tuple ) -> Any:
return run_hp_search_ray(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : List[str] ) -> List[str]:
return default_hp_space_ray(_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """sigopt"""
@staticmethod
def __lowerCAmelCase ( ) -> Union[str, Any]:
return is_sigopt_available()
def __lowerCAmelCase ( self : Dict , _A : List[str] , _A : int , _A : str , **_A : Optional[Any] ) -> Any:
return run_hp_search_sigopt(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] ) -> Any:
return default_hp_space_sigopt(_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = """wandb"""
@staticmethod
def __lowerCAmelCase ( ) -> Any:
return is_wandb_available()
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : int , _A : str , **_A : Tuple ) -> Tuple:
return run_hp_search_wandb(_A , _A , _A , **_A )
def __lowerCAmelCase ( self : int , _A : Optional[int] ) -> Union[str, Any]:
return default_hp_space_wandb(_A )
lowerCAmelCase :int = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCAmelCase ) > 0:
__magic_name__ : Union[str, Any] = available_backends[0].name
if len(lowerCAmelCase ) > 1:
logger.info(
f'{len(lowerCAmelCase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 275 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase :List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = {}
with open(lowerCAmelCase , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase ):
__magic_name__ : Optional[Any] = line.strip()
if line:
__magic_name__ : Optional[int] = line.split()
__magic_name__ : Any = line_number
__magic_name__ : Union[str, Any] = words[0]
__magic_name__ : Dict = value
return result
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
for attribute in key.split('.' ):
__magic_name__ : Optional[Any] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : Optional[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : List[Any] = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : List[str] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__ : Tuple = hf_pointer
for attribute in hf_param_name.split('.' ):
__magic_name__ : str = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Union[str, Any] = shape_pointer.shape
# let's reduce dimension
__magic_name__ : int = value[0]
else:
__magic_name__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : Optional[Any] = value
elif weight_type == "weight_g":
__magic_name__ : List[str] = value
elif weight_type == "weight_v":
__magic_name__ : Optional[int] = value
elif weight_type == "bias":
__magic_name__ : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__magic_name__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = value
else:
__magic_name__ : int = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase ):
__magic_name__ : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__magic_name__ : Dict = 'param'
if weight_type is not None and weight_type != "param":
__magic_name__ : Union[str, Any] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__ : Optional[Any] = '.'.join([key, hf_param_name] )
else:
__magic_name__ : int = key
__magic_name__ : int = value if 'lm_head' in full_key else value[0]
lowerCAmelCase :int = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None ):
"""simple docstring"""
__magic_name__ : Dict = False
for key, mapped_key in MAPPING.items():
__magic_name__ : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__magic_name__ : Union[str, Any] = True
if "*" in mapped_key:
__magic_name__ : List[Any] = name.split(lowerCAmelCase )[0].split('.' )[-2]
__magic_name__ : List[str] = mapped_key.replace('*' , lowerCAmelCase )
if "weight_g" in name:
__magic_name__ : str = 'weight_g'
elif "weight_v" in name:
__magic_name__ : Optional[int] = 'weight_v'
elif "bias" in name:
__magic_name__ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__ : List[str] = 'weight'
else:
__magic_name__ : Any = None
if hf_dict is not None:
rename_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = fairseq_model.state_dict()
__magic_name__ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__magic_name__ : Optional[int] = True
else:
__magic_name__ : str = load_wavaveca_layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = full_name.split('conv_layers.' )[-1]
__magic_name__ : int = name.split('.' )
__magic_name__ : Any = int(items[0] )
__magic_name__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : str = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=False ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase )
else:
__magic_name__ : List[str] = WavaVecaConfig()
if is_seq_class:
__magic_name__ : Any = read_txt_into_dict(lowerCAmelCase )
__magic_name__ : Optional[Any] = idalabel
__magic_name__ : Union[str, Any] = WavaVecaForSequenceClassification(lowerCAmelCase )
__magic_name__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
feature_extractor.save_pretrained(lowerCAmelCase )
elif is_finetuned:
if dict_path:
__magic_name__ : str = Dictionary.load(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__ : Dict = target_dict.pad_index
__magic_name__ : Union[str, Any] = target_dict.bos_index
__magic_name__ : Union[str, Any] = target_dict.eos_index
__magic_name__ : Union[str, Any] = len(target_dict.symbols )
__magic_name__ : Dict = os.path.join(lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
__magic_name__ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__ : Any = 0
__magic_name__ : Optional[int] = 1
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase , )
__magic_name__ : Tuple = True if config.feat_extract_norm == 'layer' else False
__magic_name__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
__magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
__magic_name__ : Dict = WavaVecaForCTC(lowerCAmelCase )
else:
__magic_name__ : Tuple = WavaVecaForPreTraining(lowerCAmelCase )
if is_finetuned or is_seq_class:
__magic_name__ , __magic_name__ , __magic_name__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__magic_name__ : Optional[Any] = argparse.Namespace(task='audio_pretraining' )
__magic_name__ : Dict = fairseq.tasks.setup_task(lowerCAmelCase )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase )
__magic_name__ : Any = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase :Dict = parser.parse_args()
lowerCAmelCase :Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 275 | 1 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
lowerCAmelCase = ''''''
while len(snake_case__ ) % 3 != 0:
lowerCAmelCase = '''0''' + bin_string
lowerCAmelCase = [
bin_string[index : index + 3]
for index in range(len(snake_case__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCAmelCase = 0
for index, val in enumerate(snake_case__ ):
oct_val += int(2 ** (2 - index) * int(snake_case__ ) )
oct_string += str(snake_case__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 338 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(ยตA(x), ยตB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(ยตA(x), ยตB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(ยตA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(ยตA(x),(1- ยตB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [ยตA(x) + ยตB(x) โ (ยตA(x) * ยตB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (ยตA(x) * ยตB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(ยตA(x), ยตB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(ยตA(x), ยตB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 1 |
import math
def lowerCamelCase__ (__lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_SCREAMING_SNAKE_CASE : str = range(3, int(math.sqrt(__lowerCamelCase ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=1, **__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = factor * value
_SCREAMING_SNAKE_CASE : List[Any] = value
while not is_prime(__lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **__lowerCamelCase )
return value
| 325 |
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 325 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''AutoImageProcessor'''
UpperCAmelCase__ = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int]) ->Dict:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
A__ = kwargs.pop('''feature_extractor''')
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.image_processor
A__ = False
def __call__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any]) ->Any:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase__ , **UpperCAmelCase__)
A__ = kwargs.pop('''images''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
if len(UpperCAmelCase__) > 0:
A__ = args[0]
A__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
A__ = self.image_processor(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
if text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Any) ->int:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
@contextmanager
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''')
A__ = True
A__ = self.tokenizer
yield
A__ = self.image_processor
A__ = False
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : int=None) ->List[Any]:
'''simple docstring'''
if added_vocab is None:
A__ = self.tokenizer.get_added_vocab()
A__ = {}
while tokens:
A__ = re.search(R'''<s_(.*?)>''' , UpperCAmelCase__ , re.IGNORECASE)
if start_token is None:
break
A__ = start_token.group(1)
A__ = re.search(Rf"""</s_{key}>""" , UpperCAmelCase__ , re.IGNORECASE)
A__ = start_token.group()
if end_token is None:
A__ = tokens.replace(UpperCAmelCase__ , '''''')
else:
A__ = end_token.group()
A__ = re.escape(UpperCAmelCase__)
A__ = re.escape(UpperCAmelCase__)
A__ = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase__ , re.IGNORECASE)
if content is not None:
A__ = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
A__ = self.tokenajson(UpperCAmelCase__ , is_inner_value=UpperCAmelCase__ , added_vocab=UpperCAmelCase__)
if value:
if len(UpperCAmelCase__) == 1:
A__ = value[0]
A__ = value
else: # leaf nodes
A__ = []
for leaf in content.split(R'''<sep/>'''):
A__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
A__ = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase__)
if len(output[key]) == 1:
A__ = output[key][0]
A__ = tokens[tokens.find(UpperCAmelCase__) + len(UpperCAmelCase__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase__ , added_vocab=UpperCAmelCase__)
if len(UpperCAmelCase__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase=0.01 , _lowerCamelCase=1000 ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = p_stop
UpperCAmelCase__ : Any = max_length
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase__ : Dict = random.random() < self.p_stop
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True ):
"""simple docstring"""
UpperCAmelCase__ : int = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
for i in range(2 )
]
UpperCAmelCase__ : List[Any] = [list(_lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase ) for shard in batch_sampler_shards] , [len(_lowerCamelCase ) for e in expected] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase__ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase__ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
# Check the shards when the dataset is very small.
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
UpperCAmelCase__ : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase__ : List[str] = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=False ):
"""simple docstring"""
random.seed(_lowerCamelCase )
UpperCAmelCase__ : Tuple = list(_lowerCamelCase )
UpperCAmelCase__ : Any = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase )
]
UpperCAmelCase__ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase )
iterable_dataset_lists.append(list(_lowerCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase__ : str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
self.assertTrue(len(_lowerCamelCase ) % shard_batch_size == 0 )
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase ) < len(_lowerCamelCase ):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase )] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = 42
UpperCAmelCase__ : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
# Edge case with a very small dataset
UpperCAmelCase__ : str = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowerCamelCase )
UpperCAmelCase__ : int = SkipBatchSampler(_lowerCamelCase , 2 )
self.assertListEqual(list(_lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase__ : int = skip_first_batches(_lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a (self ):
"""simple docstring"""
Accelerator()
UpperCAmelCase__ : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 166 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : List[str] = eval_examples
UpperCAmelCase__ : List[Any] = post_process_function
def _a (self , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = "eval" , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = gen_kwargs.copy()
UpperCAmelCase__ : Optional[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase__ : int = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase__ : int = gen_kwargs
UpperCAmelCase__ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : Tuple = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : List[str] = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = time.time()
UpperCAmelCase__ : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : int = eval_loop(
_lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : Any = compute_metrics
UpperCAmelCase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase__ : List[str] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Tuple = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase__ : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = gen_kwargs.copy()
UpperCAmelCase__ : List[Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Any = self.compute_metrics
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : str = time.time()
UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : List[str] = eval_loop(
_lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , metric_key_prefix=_lowerCamelCase , )
finally:
UpperCAmelCase__ : int = compute_metrics
UpperCAmelCase__ : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCamelCase , _lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Optional[Any] = self.post_process_function(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , """predict""" )
UpperCAmelCase__ : List[str] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : str = metrics.pop(_lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
| 166 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowercase ( lowercase__ ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowercase ( lowercase__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def _lowercase ( lowercase__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCAmelCase : int = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def _lowercase ( lowercase__ , lowercase__ ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__lowerCAmelCase : Optional[Any] = 0
# Doctest custom flag to ignore output.
_UpperCamelCase = doctest.register_optionflag("IGNORE_RESULT")
_UpperCamelCase = doctest.OutputChecker
class __lowercase (_UpperCAmelCase ):
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Union[str, Any]:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A_ , A_ , A_ )
_UpperCamelCase = CustomOutputChecker
_UpperCamelCase = HfDoctestModule
_UpperCamelCase = HfDocTestParser
| 275 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 )
__lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase : int = random.choice(lowercase__ )
return "".join(lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowercase__ ):
__lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0]
__lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase__ )
# Generate random starting population.
__lowerCAmelCase : List[Any] = []
for _ in range(lowercase__ ):
population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase : List[Any] = [
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCamelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCamelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'รจรฉรฒร โฌรน=)(&%$ยฃ/\\"
)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 275 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = """โ"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_SCREAMING_SNAKE_CASE : Dict = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
_SCREAMING_SNAKE_CASE : Any = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : int="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : int="<s>" , lowercase_ : int="<unk>" , lowercase_ : int="<pad>" , lowercase_ : Any="<mask>" , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : List[Any] = None , lowercase_ : str=None , **lowercase_ : Union[str, Any] , ):
UpperCamelCase__ : Dict =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase__ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ' | 's' | 'โde' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ' | 's' | 'โde' | '-' | 'โa'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ : List[Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ : Union[str, Any] =1
UpperCamelCase__ : Tuple =len(self.sp_model )
UpperCamelCase__ : Optional[int] ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCamelCase__ : str ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase__ : str =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase__ : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase__ : int =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase__ : Optional[Any] =src_lang if src_lang is not None else '''en_XX'''
UpperCamelCase__ : List[Any] =self.lang_code_to_id[self._src_lang]
UpperCamelCase__ : Tuple =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
UpperCamelCase__ : Tuple =self.__dict__.copy()
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : Union[str, Any] =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , lowercase_ : Any ):
UpperCamelCase__ : List[Any] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[int] ={}
UpperCamelCase__ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCAmelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _lowerCAmelCase ( self : Dict , lowercase_ : List[str] ):
UpperCamelCase__ : str =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] = None , lowercase_ : Any = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str =[1] * len(self.prefix_tokens )
UpperCamelCase__ : Union[str, Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] = None ):
UpperCamelCase__ : Any =[self.sep_token_id]
UpperCamelCase__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , **lowercase_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCamelCase__ : List[str] =src_lang
UpperCamelCase__ : Any =self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] =self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] =tgt_lang_id
return inputs
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Any ={self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self : str , lowercase_ : str ):
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Tuple , lowercase_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : Tuple =self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self : Dict , lowercase_ : Optional[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Dict ):
UpperCamelCase__ : int =''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _lowerCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Dict = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ : int =os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
UpperCamelCase__ : Optional[int] =self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] = "en_XX" , lowercase_ : int = None , lowercase_ : str = "ro_RO" , **lowercase_ : Optional[Any] , ):
UpperCamelCase__ : Optional[Any] =src_lang
UpperCamelCase__ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCAmelCase ( self : Union[str, Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[Any] ):
UpperCamelCase__ : Optional[int] =self.lang_code_to_id[src_lang]
UpperCamelCase__ : int =[]
UpperCamelCase__ : Any =[self.eos_token_id, self.cur_lang_code]
def _lowerCAmelCase ( self : int , lowercase_ : str ):
UpperCamelCase__ : Tuple =self.lang_code_to_id[lang]
UpperCamelCase__ : Dict =[]
UpperCamelCase__ : List[Any] =[self.eos_token_id, self.cur_lang_code]
| 371 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = """path-to-your-trained-model"""
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_SCREAMING_SNAKE_CASE : Dict = """A photo of sks dog in a bucket"""
_SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 157 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "align_text_model"
def __init__( self : Any , lowercase_ : str=3_0522 , lowercase_ : int=768 , lowercase_ : Tuple=12 , lowercase_ : int=12 , lowercase_ : Dict=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=512 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=1e-12 , lowercase_ : Optional[int]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Tuple=True , **lowercase_ : List[str] , ):
super().__init__(**lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
@classmethod
def A_ ( cls : int , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Tuple ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align_vision_model"
def __init__( self : Any , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.2 , **lowercase_ : Any , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align"
snake_case_ = True
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : List[Any]=None , lowercase_ : Dict=640 , lowercase_ : Optional[int]=1.0 , lowercase_ : Any=0.02 , **lowercase_ : Optional[int] , ):
super().__init__(**lowercase_ )
if text_config is None:
snake_case_ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
snake_case_ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
snake_case_ = AlignTextConfig(**lowercase_ )
snake_case_ = AlignVisionConfig(**lowercase_ )
snake_case_ = projection_dim
snake_case_ = temperature_init_value
snake_case_ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , lowercase_ : AlignTextConfig , lowercase_ : AlignVisionConfig , **lowercase_ : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 56 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( _lowerCamelCase ):
snake_case_ = 42
@flax_register_to_config
class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
snake_case_ = 32
snake_case_ = 4
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case_ = False
snake_case_ = (320, 640, 1_280, 1_280)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_280
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = False
def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ):
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ ,snake_case_ = jax.random.split(lowercase_ )
snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def A_ ( self : List[str] ):
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )]
snake_case_ = i == len(lowercase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ):
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(lowercase_ , 0 )
snake_case_ = self.time_proj(lowercase_ )
snake_case_ = self.time_embedding(lowercase_ )
# 2. pre-process
snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(lowercase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase_ , lowercase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = up_block(
lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , )
else:
snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(lowercase_ )
snake_case_ = nn.silu(lowercase_ )
snake_case_ = self.conv_out(lowercase_ )
snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase_ )
| 56 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 179 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case : int = get_tests_dir('fixtures')
_snake_case : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_snake_case : Optional[int] = get_tests_dir('fixtures/dummy-config.json')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = 0
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_a = WavaVecaFeatureExtractor(**lowerCAmelCase_ )
# save in new folder
model_config.save_pretrained(lowerCAmelCase_ )
config.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ):
_a = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase_ )
_a = AutoFeatureExtractor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
class A ( _a ):
lowercase_ = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCAmelCase_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 179 | 1 |
"""simple docstring"""
from typing import Any
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ):
'''simple docstring'''
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for state in states_space:
_UpperCAmelCase = observations_space[0]
_UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = observations_space[o]
_UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase = ''''''
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase = arg_max
# The final observation
_UpperCAmelCase = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
_UpperCAmelCase = ''''''
_UpperCAmelCase = -1
for k_state in states_space:
_UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase = probability
_UpperCAmelCase = k_state
_UpperCAmelCase = arg_max
# Process pointers backwards
_UpperCAmelCase = last_state
_UpperCAmelCase = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_validate_list(_SCREAMING_SNAKE_CASE , '''observations_space''' )
_validate_list(_SCREAMING_SNAKE_CASE , '''states_space''' )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = f'{var_name} must be a list'
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = f'{var_name} must be a list of strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
_validate_dict(_SCREAMING_SNAKE_CASE , '''initial_probabilities''' , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , '''transition_probabilities''' )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , '''emission_probabilities''' )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = f'{var_name} must be a dict'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
_UpperCAmelCase = f'{var_name} all keys must be strings'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
_UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
_UpperCAmelCase = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """poolformer"""
def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict:
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = pool_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = depths
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = num_encoder_blocks
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = initializer_range
super().__init__(**__UpperCamelCase )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = version.parse("""1.11""")
@property
def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Tuple )->float:
return 2e-3
| 260 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_A = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __UpperCamelCase ( _A=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class A ( __UpperCAmelCase ):
__snake_case = None
__snake_case = None
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = dataset_module_factory(UpperCamelCase__, cache_dir=UpperCamelCase__ )
lowerCAmelCase_ = import_main_class(dataset_module.module_path, dataset=UpperCamelCase__ )
lowerCAmelCase_ = builder_cls(
cache_dir=UpperCamelCase__, config_name=UpperCamelCase__, hash=dataset_module.hash, )
lowerCAmelCase_ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep, '''/''' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ = cached_path(UpperCamelCase__, cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
lowerCAmelCase_ = dataset_module_factory('''wikipedia''' , cache_dir=_A )
lowerCAmelCase_ = import_main_class(dataset_module.module_path )
lowerCAmelCase_ = builder_cls(
cache_dir=_A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ = None
builder_instance.download_and_prepare()
lowerCAmelCase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = dataset_module_factory('''wikipedia''' , cache_dir=_A )
lowerCAmelCase_ = import_main_class(dataset_module.module_path , dataset=_A )
lowerCAmelCase_ = builder_cls(
cache_dir=_A , config_name='''20220301.frr''' , hash=dataset_module.hash , )
lowerCAmelCase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_A , _A )
assert "train" in ds
assert isinstance(ds['''train'''] , _A )
assert next(iter(ds['''train'''] ) )
| 167 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 167 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A = logging.get_logger(__name__)
A = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trajectory_transformer'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCAmelCase=100 , _UpperCAmelCase=5 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=249 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=25 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_0_0_6 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , **_UpperCAmelCase , ):
__a : Optional[Any] = vocab_size
__a : Union[str, Any] = action_weight
__a : Union[str, Any] = reward_weight
__a : List[Any] = value_weight
__a : Tuple = max_position_embeddings
__a : Dict = block_size
__a : str = action_dim
__a : str = observation_dim
__a : Dict = transition_dim
__a : List[str] = learning_rate
__a : Dict = n_layer
__a : List[Any] = n_head
__a : Union[str, Any] = n_embd
__a : Dict = embd_pdrop
__a : List[str] = attn_pdrop
__a : Any = resid_pdrop
__a : Any = initializer_range
__a : List[Any] = layer_norm_eps
__a : Dict = kaiming_initializer_range
__a : str = use_cache
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 160 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A = NewType('''DataClass''', Any)
A = NewType('''DataClassType''', Any)
def __A ( a_ :List[str]) -> Tuple:
if isinstance(a_ , a_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""")
def __A ( a_ :list) -> Callable[[str], Any]:
__a : Any = {str(a_): choice for choice in choices}
return lambda a_: str_to_choice.get(a_ , a_)
def __A ( *,
a_ :Union[str, List[str]] = None , a_ :str = None , a_ :Any = dataclasses.MISSING , a_ :Callable[[], Any] = dataclasses.MISSING , a_ :dict = None , **a_ :str , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__a : List[Any] = {}
if aliases is not None:
__a : Optional[Any] = aliases
if help is not None:
__a : int = help
return dataclasses.field(metadata=a_ , default=a_ , default_factory=a_ , **a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__a : str = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
__a : int = [dataclass_types]
__a : Optional[Any] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = f"""--{field.name}"""
__a : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__a : Dict = kwargs.pop('''aliases''' , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = [aliases]
__a : Tuple = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , '''UnionType''' ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__a : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__a : List[str] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__a : List[str] = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__a : Optional[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__a : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
__a : int = field.type.__args__
else:
__a : List[str] = [x.value for x in field.type]
__a : Any = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__a : Tuple = field.default
else:
__a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__a : Any = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__a : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__a : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__a : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__a : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__a : List[Any] = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = field.type.__args__[0]
__a : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__a : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
__a : List[Any] = True
else:
__a : int = field.type
if field.default is not dataclasses.MISSING:
__a : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
__a : Optional[int] = field.default_factory()
else:
__a : Union[str, Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__a : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , '''_argument_group_name''' ):
__a : Any = self.add_argument_group(dtype._argument_group_name )
else:
__a : Optional[Any] = self
try:
__a : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
__a : Union[str, Any] = '''.'''.join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
__a : str = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__a : int = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__a : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__a , __a : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
__a : Union[str, Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
__a : Union[str, Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
__a , __a : str = self.parse_known_args(args=_UpperCAmelCase )
__a : Optional[int] = []
for dtype in self.dataclass_types:
__a : Optional[int] = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
__a : int = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = set(args.keys() )
__a : List[str] = []
for dtype in self.dataclass_types:
__a : Dict = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
__a : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__a : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}""" )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
with open(Path(_UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
__a : int = json.loads(open_json_file.read() )
__a : str = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
__a : Tuple = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 160 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
@flax_register_to_config
class __SCREAMING_SNAKE_CASE (nn.Module , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__a =32
__a =4
__a =4
__a =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__a =False
__a =(320, 640, 1280, 1280)
__a =2
__a =8
__a =None
__a =1280
__a =0.0
__a =False
__a =jnp.floataa
__a =True
__a =0
__a =False
def UpperCamelCase__ ( self : Union[str, Any] , __a : jax.random.KeyArray ):
# init input tensors
_a = (1, self.in_channels, self.sample_size, self.sample_size)
_a = jnp.zeros(__a , dtype=jnp.floataa )
_a = jnp.ones((1,) , dtype=jnp.intaa )
_a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_a , _a = jax.random.split(__a )
_a = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a , __a , __a , __a )["params"]
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.block_out_channels
_a = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_a = self.num_attention_heads or self.attention_head_dim
# input
_a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_a = FlaxTimestepEmbedding(__a , dtype=self.dtype )
_a = self.only_cross_attention
if isinstance(__a , __a ):
_a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__a , __a ):
_a = (num_attention_heads,) * len(self.down_block_types )
# down
_a = []
_a = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(__a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_a = FlaxCrossAttnDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a = FlaxDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__a )
_a = down_blocks
# mid
_a = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_a = []
_a = list(reversed(__a ) )
_a = list(reversed(__a ) )
_a = list(reversed(__a ) )
_a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_a = output_channel
_a = reversed_block_out_channels[i]
_a = reversed_block_out_channels[min(i + 1 , len(__a ) - 1 )]
_a = i == len(__a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_a = FlaxCrossAttnUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a = FlaxUpBlockaD(
in_channels=__a , out_channels=__a , prev_output_channel=__a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__a )
_a = output_channel
_a = up_blocks
# out
_a = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_a = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict=None , __a : int=None , __a : bool = True , __a : bool = False , ):
# 1. time
if not isinstance(__a , jnp.ndarray ):
_a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__a , jnp.ndarray ) and len(timesteps.shape ) == 0:
_a = timesteps.astype(dtype=jnp.floataa )
_a = jnp.expand_dims(__a , 0 )
_a = self.time_proj(__a )
_a = self.time_embedding(__a )
# 2. pre-process
_a = jnp.transpose(__a , (0, 2, 3, 1) )
_a = self.conv_in(__a )
# 3. down
_a = (sample,)
for down_block in self.down_blocks:
if isinstance(__a , __a ):
_a , _a = down_block(__a , __a , __a , deterministic=not train )
else:
_a , _a = down_block(__a , __a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_a = ()
for down_block_res_sample, down_block_additional_residual in zip(
__a , __a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_a = new_down_block_res_samples
# 4. mid
_a = self.mid_block(__a , __a , __a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_a = down_block_res_samples[-(self.layers_per_block + 1) :]
_a = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__a , __a ):
_a = up_block(
__a , temb=__a , encoder_hidden_states=__a , res_hidden_states_tuple=__a , deterministic=not train , )
else:
_a = up_block(__a , temb=__a , res_hidden_states_tuple=__a , deterministic=not train )
# 6. post-process
_a = self.conv_norm_out(__a )
_a = nn.silu(__a )
_a = self.conv_out(__a )
_a = jnp.transpose(__a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__a )
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.