code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A_ ( _lowerCamelCase : str ):
_lowerCAmelCase = analyze_text(__lowerCAmelCase )
_lowerCAmelCase = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowerCAmelCase = sum(single_char_strings.values() )
# one length string
_lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowerCAmelCase = single_char_strings[ch]
_lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
_lowerCAmelCase = sum(two_char_strings.values() )
_lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowerCAmelCase = cha + cha
if sequence in two_char_strings:
_lowerCAmelCase = two_char_strings[sequence]
_lowerCAmelCase = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def A_ ( _lowerCamelCase : str ):
_lowerCAmelCase = Counter() # type: ignore
_lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 309 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( UpperCAmelCase_ ):
lowercase_ : Tuple = ['''image_processor''', '''tokenizer''']
lowercase_ : str = '''BlipImageProcessor'''
lowercase_ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = self.image_processor
def __call__( self : Optional[Any] , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : int , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__UpperCamelCase : Optional[int] = self.tokenizer
__UpperCamelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
__UpperCamelCase : str = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
__UpperCamelCase : Optional[int] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
__UpperCamelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def a ( self : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def a ( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer.model_input_names
__UpperCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 269 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : int = {}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """llama"""
lowerCAmelCase_ = ["""past_key_values"""]
def __init__( self : str , UpperCamelCase__ : Optional[Any]=3_2000 , UpperCamelCase__ : Any=4096 , UpperCamelCase__ : List[Any]=1_1008 , UpperCamelCase__ : int=32 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any="silu" , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Optional[Any]=1E-6 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Tuple , ) -> int:
_UpperCamelCase =vocab_size
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =hidden_size
_UpperCamelCase =intermediate_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_UpperCamelCase =num_attention_heads
_UpperCamelCase =num_key_value_heads
_UpperCamelCase =hidden_act
_UpperCamelCase =initializer_range
_UpperCamelCase =rms_norm_eps
_UpperCamelCase =pretraining_tp
_UpperCamelCase =use_cache
_UpperCamelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
_UpperCamelCase =self.rope_scaling.get('''type''' , UpperCamelCase__ )
_UpperCamelCase =self.rope_scaling.get('''factor''' , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 700 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_UpperCamelCase =1024
_UpperCamelCase =4096
_UpperCamelCase =24
_UpperCamelCase =16
_UpperCamelCase =[5, 11, 17, 23]
_UpperCamelCase =[256, 512, 1024, 1024]
_UpperCamelCase =(1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =[256, 512, 768, 768]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase =(1, 384, 384)
_UpperCamelCase =False
_UpperCamelCase ='''project'''
if "ade" in checkpoint_url:
_UpperCamelCase =True
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase ='''huggingface/label-files'''
_UpperCamelCase ='''ade20k-id2label.json'''
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
_UpperCamelCase =[1, 150, 480, 480]
return config, expected_shape
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_UpperCamelCase =name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_UpperCamelCase =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_UpperCamelCase =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_UpperCamelCase =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_UpperCamelCase =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_UpperCamelCase =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_UpperCamelCase =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_UpperCamelCase =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_UpperCamelCase =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_UpperCamelCase =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase =name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCamelCase =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_UpperCamelCase =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_UpperCamelCase =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_UpperCamelCase =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_UpperCamelCase =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_UpperCamelCase =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_UpperCamelCase =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_UpperCamelCase =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_UpperCamelCase =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_UpperCamelCase =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_UpperCamelCase =name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_UpperCamelCase =name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_UpperCamelCase =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase =name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_UpperCamelCase =name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_UpperCamelCase =name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[: config.hidden_size, :]
_UpperCamelCase =in_proj_bias[: config.hidden_size]
_UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase =in_proj_bias[-config.hidden_size :]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =get_dpt_config(__SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# read in qkv matrices
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
_UpperCamelCase =DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
_UpperCamelCase =480 if '''ade''' in checkpoint_url else 384
_UpperCamelCase =DPTImageProcessor(size=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =prepare_img()
_UpperCamelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**__SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
_UpperCamelCase =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 271 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = len(A__ ), len(grid[0] )
if (
min(A__ ,A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ : Tuple = 0
count += depth_first_search(A__ ,row + 1 ,A__ ,A__ )
count += depth_first_search(A__ ,row - 1 ,A__ ,A__ )
count += depth_first_search(A__ ,A__ ,col + 1 ,A__ )
count += depth_first_search(A__ ,A__ ,col - 1 ,A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase__ = [144, 192, 240]
UpperCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase__ = [96, 120, 144]
UpperCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase__ = [64, 80, 96]
UpperCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase__ = 0.05
UpperCAmelCase__ = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 21
UpperCAmelCase__ = """pascal-voc-id2label.json"""
else:
UpperCAmelCase__ = 1000
UpperCAmelCase__ = """imagenet-1k-id2label.json"""
UpperCAmelCase__ = """huggingface/label-files"""
UpperCAmelCase__ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=False ):
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
UpperCAmelCase__ = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
UpperCAmelCase__ = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
UpperCAmelCase__ = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
UpperCAmelCase__ = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
UpperCAmelCase__ = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase__ = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
UpperCAmelCase__ = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
UpperCAmelCase__ = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
UpperCAmelCase__ = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
UpperCAmelCase__ = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
UpperCAmelCase__ = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
UpperCAmelCase__ = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if F'''.global_rep.{i}.bias''' in name:
UpperCAmelCase__ = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
UpperCAmelCase__ = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase__ = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
UpperCAmelCase__ = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
UpperCAmelCase__ = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
UpperCAmelCase__ = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
UpperCAmelCase__ = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase__ = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
UpperCAmelCase__ = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase__ = """mobilevit.""" + name
return name
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
if base_model:
UpperCAmelCase__ = """"""
else:
UpperCAmelCase__ = """mobilevit."""
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(_lowerCAmelCase )
if key[:8] == "encoder.":
UpperCAmelCase__ = key[8:]
if "qkv" in key:
UpperCAmelCase__ = key.split(""".""" )
UpperCAmelCase__ = int(key_split[0][6:] ) - 1
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
UpperCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase__ = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = val
return orig_state_dict
def lowercase ( ):
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
UpperCAmelCase__ = get_mobilevit_config(_lowerCAmelCase )
# load original state_dict
UpperCAmelCase__ = torch.load(_lowerCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase__ = MobileViTForSemanticSegmentation(_lowerCAmelCase ).eval()
else:
UpperCAmelCase__ = MobileViTForImageClassification(_lowerCAmelCase ).eval()
UpperCAmelCase__ = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
UpperCAmelCase__ = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
UpperCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowerCAmelCase , organization="""apple""" )
model.push_to_hub(_lowerCAmelCase , organization="""apple""" )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 392 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ (unittest.TestCase ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = inspect.getfile(accelerate.test_utils)
UpperCAmelCase_ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase_ : Any = test_metrics
@require_cpu
def A_ ( self):
"""simple docstring"""
debug_launcher(self.test_metrics.main ,num_processes=1)
@require_cpu
def A_ ( self):
"""simple docstring"""
debug_launcher(self.test_metrics.main)
@require_single_gpu
def A_ ( self):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def A_ ( self):
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""")
UpperCAmelCase_ : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowercase ,env=os.environ.copy())
| 455 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self ,lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase="resnet50" ,lowercase=3 ,lowercase=32 ,lowercase=3 ,lowercase=True ,lowercase=True ,):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : int = out_indices if out_indices is not None else [4]
UpperCAmelCase_ : Any = stage_names
UpperCAmelCase_ : List[Any] = out_features
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : Union[str, Any] = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[Any] = use_pretrained_backbone
UpperCAmelCase_ : Optional[int] = is_training
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values
def A_ ( self):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TimmBackbone(config=lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self)
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowercase ,has_text_modality=lowercase)
def A_ ( self):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : int = "resnet18"
UpperCAmelCase_ : Tuple = "microsoft/resnet-18"
UpperCAmelCase_ : int = AutoBackbone.from_pretrained(lowercase ,use_timm_backbone=lowercase)
UpperCAmelCase_ : Dict = AutoBackbone.from_pretrained(lowercase)
self.assertEqual(len(timm_model.out_features) ,len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) ,len(transformers_model.stage_names))
self.assertEqual(timm_model.channels ,transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,))
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names) - 1])
UpperCAmelCase_ : Dict = AutoBackbone.from_pretrained(lowercase ,use_timm_backbone=lowercase ,out_indices=[1, 2, 3])
UpperCAmelCase_ : List[str] = AutoBackbone.from_pretrained(lowercase ,out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) ,len(transformers_model.out_features))
self.assertEqual(timm_model.channels ,transformers_model.channels)
@unittest.skip("TimmBackbone doesn't support feed forward chunking")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm.")
def A_ ( self):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def A_ ( self):
"""simple docstring"""
pass
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase)
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowercase)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase_ : str = self.all_model_classes[0]
UpperCAmelCase_ : List[Any] = model_class(lowercase)
model.to(lowercase)
UpperCAmelCase_ : Any = self._prepare_for_class(lowercase ,lowercase)
UpperCAmelCase_ : Optional[int] = model(**lowercase)
UpperCAmelCase_ : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase_ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase_ : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Tuple = model(**lowercase)
self.assertEqual(len(result.feature_maps) ,len(config.out_indices))
self.assertEqual(len(model.channels) ,len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase_ : Tuple = copy.deepcopy(lowercase)
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Tuple = model(**lowercase)
self.assertEqual(len(result.feature_maps) ,1)
self.assertEqual(len(model.channels) ,1)
# Check backbone can be initialized with fresh weights
UpperCAmelCase_ : str = copy.deepcopy(lowercase)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = model_class(lowercase)
model.to(lowercase)
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(**lowercase)
| 455 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _UpperCAmelCase :
def __init__( self , a__ , a__=sys.maxsize ):
A_ : Optional[int] = """bilinear"""
A_ : Tuple = max_size
A_ : List[Any] = short_edge_length
def __call__( self , a__ ):
A_ : List[Any] = []
for img in imgs:
A_ , A_ : str = img.shape[:2]
# later: provide list and randomly choose index for resize
A_ : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A_ : Tuple = size * 1.0 / min(a__ , a__ )
if h < w:
A_ , A_ : int = size, scale * w
else:
A_ , A_ : Any = scale * h, size
if max(a__ , a__ ) > self.max_size:
A_ : int = self.max_size * 1.0 / max(a__ , a__ )
A_ : Any = newh * scale
A_ : int = neww * scale
A_ : List[str] = int(neww + 0.5 )
A_ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
A_ : int = Image.fromarray(a__ )
A_ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A_ : int = np.asarray(a__ )
else:
A_ : Union[str, Any] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A_ : str = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A_ : int = cfg.INPUT.FORMAT
A_ : Tuple = cfg.SIZE_DIVISIBILITY
A_ : Dict = cfg.PAD_VALUE
A_ : Optional[int] = cfg.INPUT.MAX_SIZE_TEST
A_ : Union[str, Any] = cfg.MODEL.DEVICE
A_ : str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : str = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def _lowerCamelCase ( self , a__ ):
A_ : Union[str, Any] = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
A_ : Union[str, Any] = [im.shape[-2:] for im in images]
A_ : int = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self , a__ , a__=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
A_ : List[Any] = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A_ : Any = torch.tensor([im.shape[:2] for im in images] )
A_ : List[Any] = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A_ : Any = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
A_ , A_ : int = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A_ : Union[str, Any] = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
assert torch.isfinite(_lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
A_ , A_ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 1].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 2].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 3].clamp_(min=0 ,max=_lowerCAmelCase )
| 569 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "first_exhausted" ,):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Dict = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
| 569 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : Dict = 16
lowercase_ : int = 32
def a__ ( snake_case , snake_case = 16 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : int = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE : Tuple = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE : Tuple = 8
else:
__SCREAMING_SNAKE_CASE : List[str] = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : int = mocked_dataloaders # noqa: F811
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
__SCREAMING_SNAKE_CASE : Tuple = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : List[str] = config['''lr''']
__SCREAMING_SNAKE_CASE : Optional[Any] = int(config['''num_epochs'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = int(config['''seed'''] )
__SCREAMING_SNAKE_CASE : List[Any] = int(config['''batch_size'''] )
__SCREAMING_SNAKE_CASE : Dict = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(snake_case ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE : Dict = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE : List[Any] = model(**__snake_case )
__SCREAMING_SNAKE_CASE : Any = outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(**__snake_case )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__SCREAMING_SNAKE_CASE : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 705 |
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = iter(snake_case )
while True:
__SCREAMING_SNAKE_CASE : int = tuple(itertools.islice(snake_case , snake_case ) )
if not chunk:
return
yield chunk
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__SCREAMING_SNAKE_CASE : Tuple = ''''''
if len(snake_case ) < 2:
return dirty
for i in range(len(snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case ) & 1:
clean += "X"
return clean
def a__ ( snake_case ):
"""simple docstring"""
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__SCREAMING_SNAKE_CASE : List[str] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__SCREAMING_SNAKE_CASE : str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case )
return table
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = generate_table(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = prepare_input(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case , 2 ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(snake_case ) , 5 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = generate_table(snake_case )
__SCREAMING_SNAKE_CASE : int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case , 2 ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = divmod(table.index(snake_case ) , 5 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 131 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "unispeech-sat"
def __init__(self : List[str] , snake_case_ : str=3_2 , snake_case_ : Dict=7_6_8 , snake_case_ : Any=1_2 , snake_case_ : List[str]=1_2 , snake_case_ : List[str]=3_0_7_2 , snake_case_ : Tuple="gelu" , snake_case_ : str=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Any=0.0 , snake_case_ : int=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[str]=0.02 , snake_case_ : List[Any]=1E-5 , snake_case_ : Optional[int]="group" , snake_case_ : Union[str, Any]="gelu" , snake_case_ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case_ : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_ : Optional[Any]=False , snake_case_ : List[Any]=1_2_8 , snake_case_ : Optional[int]=1_6 , snake_case_ : List[Any]=False , snake_case_ : Optional[Any]=True , snake_case_ : List[Any]=0.05 , snake_case_ : List[str]=1_0 , snake_case_ : List[Any]=2 , snake_case_ : List[str]=0.0 , snake_case_ : Tuple=1_0 , snake_case_ : Any=0 , snake_case_ : Tuple=3_2_0 , snake_case_ : Dict=2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Dict=1_0_0 , snake_case_ : Optional[Any]=2_5_6 , snake_case_ : int=2_5_6 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[Any]="mean" , snake_case_ : Union[str, Any]=False , snake_case_ : List[Any]=False , snake_case_ : str=2_5_6 , snake_case_ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_ : int=(5, 3, 3, 1, 1) , snake_case_ : Tuple=(1, 2, 3, 1, 1) , snake_case_ : Tuple=5_1_2 , snake_case_ : List[Any]=0 , snake_case_ : int=1 , snake_case_ : int=2 , snake_case_ : Dict=5_0_4 , **snake_case_ : str , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
__a : Tuple = hidden_size
__a : Tuple = feat_extract_norm
__a : List[Any] = feat_extract_activation
__a : List[Any] = list(snake_case_ )
__a : List[str] = list(snake_case_ )
__a : Union[str, Any] = list(snake_case_ )
__a : str = conv_bias
__a : int = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : str = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : str = intermediate_size
__a : int = hidden_act
__a : str = num_attention_heads
__a : List[str] = hidden_dropout
__a : Dict = attention_dropout
__a : List[str] = activation_dropout
__a : Optional[int] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : Tuple = layerdrop
__a : Dict = layer_norm_eps
__a : Tuple = initializer_range
__a : Optional[Any] = vocab_size
__a : Any = num_clusters
__a : Optional[int] = do_stable_layer_norm
__a : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Optional[int] = apply_spec_augment
__a : str = mask_time_prob
__a : Union[str, Any] = mask_time_length
__a : int = mask_time_min_masks
__a : Tuple = mask_feature_prob
__a : Tuple = mask_feature_length
__a : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : str = num_codevectors_per_group
__a : Tuple = num_codevector_groups
__a : str = contrastive_logits_temperature
__a : Dict = feat_quantizer_dropout
__a : List[Any] = num_negatives
__a : Optional[Any] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Optional[int] = diversity_loss_weight
# ctc loss
__a : str = ctc_loss_reduction
__a : int = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : Union[str, Any] = list(snake_case_ )
__a : Dict = list(snake_case_ )
__a : Union[str, Any] = list(snake_case_ )
__a : Any = xvector_output_dim
@property
def lowerCAmelCase (self : str ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 521 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): # noqa: E741
while r - l > 1:
__a : Tuple = (l + r) // 2
if v[m] >= key:
__a : Dict = m
else:
__a : Dict = m # noqa: E741
return r
def __UpperCamelCase ( lowerCAmelCase__ : list[int] ):
if len(lowerCAmelCase__ ) == 0:
return 0
__a : List[str] = [0] * len(lowerCAmelCase__ )
__a : Any = 1
__a : int = v[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
if v[i] < tail[0]:
__a : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
__a : Optional[Any] = v[i]
length += 1
else:
__a : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = parent
def lowercase ( self : Dict ) -> str:
return {}
def a_ ( ):
__lowerCAmelCase = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
__lowerCAmelCase = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self : Tuple ) -> Dict:
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self : Union[str, Any] ) -> Dict:
# Initialize feature_extractor
__lowerCAmelCase = self.feature_extraction_class()
# Test not batched input
__lowerCAmelCase = get_html_strings()[0]
__lowerCAmelCase = feature_extractor(lowerCAmelCase_ )
# fmt: off
__lowerCAmelCase = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
__lowerCAmelCase = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
# Test batched
__lowerCAmelCase = get_html_strings()
__lowerCAmelCase = feature_extractor(lowerCAmelCase_ )
# fmt: off
__lowerCAmelCase = expected_nodes + [['My First Heading', 'My first paragraph.']]
__lowerCAmelCase = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase_ )
self.assertEqual(encoding.xpaths , lowerCAmelCase_ )
| 421 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(lowerCAmelCase_, lowerCAmelCase_, bias=lowerCAmelCase_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def a_ ( lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = Namespace(**checkpoint['cfg']['model'] )
__lowerCAmelCase = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase_ )
__lowerCAmelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
__lowerCAmelCase = {key.replace('decoder', 'model' ): val for key, val in state_dict.items()}
__lowerCAmelCase = XGLMConfig(
vocab_size=lowerCAmelCase_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='gelu', scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__lowerCAmelCase = XGLMForCausalLM(lowerCAmelCase_ )
__lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
print(lowerCAmelCase_ )
__lowerCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case : List[str] = parser.parse_args()
_snake_case : Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 421 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : str = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372 |
'''simple docstring'''
lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_0_0_0_0_0_3
def _A ( A ,A ) -> bool:
lowercase : List[Any] = len(A )
lowercase : List[Any] = len(A )
if p_len > t_len:
return False
lowercase : List[str] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ) -> None:
lowercase : Dict = "abc1abc12"
lowercase : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A ,A ) and not rabin_karp(A ,A )
# Test 2)
lowercase : str = "ABABX"
lowercase : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(A ,A )
# Test 3)
lowercase : str = "AAAB"
lowercase : List[str] = "ABAAAAAB"
assert rabin_karp(A ,A )
# Test 4)
lowercase : List[str] = "abcdabcy"
lowercase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A ,A )
# Test 5)
lowercase : int = "Lü"
lowercase : Optional[Any] = "Lüsai"
assert rabin_karp(A ,A )
lowercase : Tuple = "Lue"
assert not rabin_karp(A ,A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 1 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowercase ( _snake_case : Tuple ) ->Optional[int]:
"""simple docstring"""
if not sentence:
return ""
__snake_case : List[str] = dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=None , a_=None , a_=None , a_="resnet50" , a_=3 , a_=32 , a_=3 , a_=True , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Tuple = out_indices if out_indices is not None else [4]
__snake_case : Optional[Any] = stage_names
__snake_case : str = out_features
__snake_case : List[str] = backbone
__snake_case : Optional[int] = batch_size
__snake_case : Optional[int] = image_size
__snake_case : str = num_channels
__snake_case : Optional[int] = use_pretrained_backbone
__snake_case : Optional[int] = is_training
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = TimmBackboneModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''resnet18'''
__snake_case : Tuple = '''microsoft/resnet-18'''
__snake_case : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
__snake_case : Tuple = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : Optional[Any] = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
__snake_case : Optional[int] = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Any = model_class(a_ )
__snake_case : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = True
__snake_case : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Dict = self.all_model_classes[0]
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
__snake_case : int = self._prepare_for_class(a_ , a_ )
__snake_case : Optional[Any] = model(**a_ )
__snake_case : int = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : int = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : List[str] = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : Optional[Any] = copy.deepcopy(a_ )
__snake_case : str = None
__snake_case : int = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : Union[str, Any] = copy.deepcopy(a_ )
__snake_case : int = False
__snake_case : List[str] = model_class(a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(**a_ )
| 229 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] =get_activation('''swish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[str] =get_activation('''silu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] =get_activation('''mish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : str =get_activation('''gelu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 381 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 381 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a : Any = [8, 5, 9, 7]
a : Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = claim_vector
UpperCAmelCase__ = allocated_resources_table
UpperCAmelCase__ = maximum_claim_table
def A__ ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A__ ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A__ ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowercase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A__ ( self ):
return {self.__need().index(__lowercase ): i for i in self.__need()}
def A__ ( self , **__lowercase ):
UpperCAmelCase__ = self.__need()
UpperCAmelCase__ = self.__allocated_resources_table
UpperCAmelCase__ = self.__available_resources()
UpperCAmelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCAmelCase__ = False
for each_need in need_list:
UpperCAmelCase__ = True
for index, need in enumerate(__lowercase ):
if need > available_resources[index]:
UpperCAmelCase__ = False
break
if execution:
UpperCAmelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase__ = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__lowercase )
# update available/freed resources stack
UpperCAmelCase__ = np.array(__lowercase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(__lowercase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def A__ ( self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(__lowercase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(__lowercase ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(__lowercase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(__lowercase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 422 |
"""simple docstring"""
import math
a : str = 10
a : List[Any] = 7
a : Tuple = BALLS_PER_COLOUR * NUM_COLOURS
def snake_case__ ( _SCREAMING_SNAKE_CASE = 2_0 ) ->str:
UpperCAmelCase__ = math.comb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 422 | 1 |
from collections import defaultdict
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def a__ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowercase , __lowercase : Any =10, 9
__lowercase : str =defaultdict(list)
__lowercase : dict[int, bool] ={}
__lowercase : list[int] =[]
__lowercase : List[str] =0
__lowercase : int =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = (3, 3_2, 1_2_8)
_a = tempfile.mkdtemp()
# fmt: off
_a = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
_a = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
_a = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_a = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = self.prepare_image_inputs()
_a = image_processor(snake_case_ , return_tensors="np" )
_a = processor(images=snake_case_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = processor(text=snake_case_ )
_a = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.char_decode(snake_case_ )
_a = tokenizer.batch_decode(snake_case_ )
_a = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = None
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = torch.randn(1 , 2_7 , 3_8 )
_a = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_a = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 691 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 1 |
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
__UpperCamelCase : Optional[Any] = '__DUMMY_TRANSFORMERS_USER__'
__UpperCamelCase : Optional[Any] = 'Dummy User'
__UpperCamelCase : Optional[Any] = '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'
__UpperCamelCase : Optional[Any] = 'https://hub-ci.huggingface.co'
__UpperCamelCase : List[Any] = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
__UpperCamelCase : Dict = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
__UpperCamelCase : str = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
HfFolder.save_token(UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
return HfApi(endpoint=UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi ):
"""simple docstring"""
__lowerCamelCase : List[Any] = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase )
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
def _cleanup_repo(UpperCAmelCase : str ):
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _UpperCAmelCase ( UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCAmelCase : Tuple ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Optional[Any] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : int ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _UpperCAmelCase ( UpperCAmelCase : HfApi , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : Dict = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__lowerCamelCase : Dict = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" , private=UpperCAmelCase )
hf_api.upload_file(
token=UpperCAmelCase , path_or_fileobj=str(UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase , token=UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 519 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : Any = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : str = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : Any = resample
SCREAMING_SNAKE_CASE_ : int = do_center_crop
SCREAMING_SNAKE_CASE_ : Tuple = crop_size
SCREAMING_SNAKE_CASE_ : List[str] = do_rescale
SCREAMING_SNAKE_CASE_ : Dict = rescale_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : int = do_convert_rgb
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Dict = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[str] = get_size_dict(UpperCamelCase__ , param_name='size' , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(UpperCamelCase__ , param_name='crop_size' , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : List[Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : Optional[int] = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[int] = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : List[str] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Dict = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ ) | 718 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A = datasets.logging.get_logger(__name__)
A = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
A = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
A = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
A = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __snake_case ( self , UpperCamelCase__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ : Tuple = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ : Any = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE_ : List[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE_ : int = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__ )
return {"scores": scores} | 97 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=False , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , use_stable_embedding=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a) -> Any:
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> int:
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=a)
model.to(a)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowercase : str = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowercase : List[str] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1).to(a)
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , attention_mask=a , labels=a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
original_model.to(a)
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(a).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(a)
scaled_model.to(a)
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(a).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5))
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5))
| 73 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
A__ = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A__ = "fp16"
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
A__ = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
A__ = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
A__ = "fp16"
self.assertFalse(is_safetensors_compatible(UpperCamelCase__ , variant=UpperCamelCase__ ) ) | 706 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__UpperCAmelCase ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__UpperCAmelCase =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__UpperCAmelCase =BeautifulSoup(res.text, """html.parser""")
__UpperCAmelCase =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''') | 261 | 0 |
import baseaa
def A_ ( _lowerCAmelCase ) -> bytes:
return baseaa.baaencode(string.encode("utf-8" ) )
def A_ ( _lowerCAmelCase ) -> str:
return baseaa.baadecode(_lowerCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
__lowerCamelCase : int = """Hello World!"""
__lowerCamelCase : Any = baseaa_encode(test)
print(encoded)
__lowerCamelCase : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 629 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : int = use_input_mask
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = embedding_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Dict = model(A_ , token_type_ids=A_ )
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_labels
UpperCamelCase : Optional[int] = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : List[str] = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
# test_resize_embeddings = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def A_ ( _lowerCAmelCase ) -> int:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , A_ )
UpperCamelCase : List[Any] = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(A_ )[0]
UpperCamelCase : Dict = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase : List[str] = output[0, ii, jj]
UpperCamelCase : List[Any] = expected[3 * ii + jj]
UpperCamelCase : Optional[Any] = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 629 | 1 |
"""simple docstring"""
import math
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 507 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_A = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = None
# source code of `config_class`
lowerCAmelCase__ : List[Any] = inspect.getsource(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
lowerCAmelCase__ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : Optional[Any] = ckpt_name
break
return checkpoint
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : int = """\n""".join(sorted(__UpperCAmelCase ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 507 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _A ( A__ , A__ , A__ , A__=5 ):
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
__lowercase = torch.tensor(tokenizer.encode(A__ , add_special_tokens=A__ ) ).unsqueeze(0 ) # Batch size 1
__lowercase = model(A__ )[0] # The last hidden-state is the first element of the output tuple
__lowercase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase = logits[0, masked_index, :]
__lowercase = logits.softmax(dim=0 )
__lowercase , __lowercase = prob.topk(k=A__ , dim=0 )
__lowercase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A__ ) )] )
__lowercase = tokenizer.mask_token
__lowercase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A__ ) , A__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A__ , A__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase__ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowerCAmelCase__ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowerCAmelCase__ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
from __future__ import annotations
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : list[list[int]] = []
_snake_case : list[int] = []
_snake_case : int = 0
_snake_case : Tuple = sum(UpperCAmelCase )
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return result
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if sum(UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(UpperCAmelCase )) < max_sum:
return
if sum(UpperCAmelCase ) == max_sum:
result.append(UpperCAmelCase )
return
for index in range(UpperCAmelCase , len(UpperCAmelCase ) ):
create_state_space_tree(
UpperCAmelCase , UpperCAmelCase , index + 1 , [*path, nums[index]] , UpperCAmelCase , remaining_nums_sum - nums[index] , )
__lowerCAmelCase :Optional[int] = [3, 34, 4, 12, 5, 2]
__lowerCAmelCase :Dict = 9
__lowerCAmelCase :Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 717 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase :str = logging.get_logger(__name__)
class _a( __A ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __snake_case , )
super().__init__(*__snake_case , **__snake_case ) | 278 | 0 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCamelCase__ ( __A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Dict , lowerCamelCase_ : Dict = None , lowerCamelCase_ : List[Any] = None , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(self , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE : int = repo_info
SCREAMING_SNAKE_CASE : Dict = token
SCREAMING_SNAKE_CASE : List[Any] = None
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(UpperCamelCase_ ): {"""name""": str(UpperCamelCase_ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict = "rb" , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
if not isinstance(self.repo_info , UpperCamelCase_ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = hf_hub_url(self.repo_info.id , UpperCamelCase_ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase_ , mode=UpperCamelCase_ , headers=get_authentication_headers_for_url(UpperCamelCase_ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE : Dict = self._strip_protocol(UpperCamelCase_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : str=False , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE : str = PurePosixPath(path.strip("""/""" ) )
SCREAMING_SNAKE_CASE : Tuple = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE : Dict = PurePosixPath(p.strip("""/""" ) )
SCREAMING_SNAKE_CASE : List[str] = p.parent
if root == path:
SCREAMING_SNAKE_CASE : Optional[Any] = f
SCREAMING_SNAKE_CASE : Any = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 379 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case__ ( unittest.TestCase ):
def A ( self ) -> int:
"""simple docstring"""
a_ : List[str] = """ZinengTang/tvlt-base"""
a_ : Dict = tempfile.mkdtemp()
def A ( self , **UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self , **UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Optional[int] = self.get_feature_extractor()
a_ : List[str] = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = self.get_image_processor()
a_ : Tuple = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Tuple = np.ones([12000] )
a_ : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="""np""" )
a_ : List[Any] = processor(audio=UpperCamelCase_ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Dict = self.get_feature_extractor()
a_ : int = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : List[str] = np.ones([3, 224, 224] )
a_ : str = image_processor(UpperCamelCase_ , return_tensors="""np""" )
a_ : Tuple = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Any = np.ones([12000] )
a_ : Optional[Any] = np.ones([3, 224, 224] )
a_ : Tuple = processor(audio=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 419 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE ( __lowercase ):
"""simple docstring"""
lowerCamelCase : str ="decision_transformer"
lowerCamelCase : Union[str, Any] =["past_key_values"]
lowerCamelCase : List[str] ={
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=17 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[int]=1_28 , lowerCAmelCase : Union[str, Any]=40_96 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Optional[Any]=10_24 , lowerCAmelCase : Any=3 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]="relu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Union[str, Any]=1e-5 , lowerCAmelCase : str=0.02 , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=True , lowerCAmelCase : Dict=5_02_56 , lowerCAmelCase : Optional[Any]=5_02_56 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[str]=False , **lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = state_dim
__lowerCAmelCase : str = act_dim
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : List[str] = max_ep_len
__lowerCAmelCase : List[str] = action_tanh
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = n_positions
__lowerCAmelCase : List[str] = n_layer
__lowerCAmelCase : Tuple = n_head
__lowerCAmelCase : int = n_inner
__lowerCAmelCase : Any = activation_function
__lowerCAmelCase : Union[str, Any] = resid_pdrop
__lowerCAmelCase : Optional[int] = embd_pdrop
__lowerCAmelCase : Optional[int] = attn_pdrop
__lowerCAmelCase : Tuple = layer_norm_epsilon
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : int = scale_attn_weights
__lowerCAmelCase : str = use_cache
__lowerCAmelCase : int = scale_attn_by_inverse_layer_idx
__lowerCAmelCase : Any = reorder_and_upcast_attn
__lowerCAmelCase : Optional[Any] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 706 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def snake_case_ (__A : np.ndarray ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray:
__lowerCAmelCase : str = np.nan
for i in range(__A ):
__lowerCAmelCase : Optional[int] = features[:, labels == i]
__lowerCAmelCase : List[str] = data.mean(1 )
# Centralize the data of class i
__lowerCAmelCase : int = data - column_reshape(__A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCAmelCase : str = np.dot(__A , centered_data.T )
return covariance_sum / features.shape[1]
def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int ) -> np.ndarray:
__lowerCAmelCase : Tuple = features.mean(1 )
__lowerCAmelCase : Union[str, Any] = np.nan
for i in range(__A ):
__lowerCAmelCase : Any = features[:, labels == i]
__lowerCAmelCase : Dict = data.shape[1]
__lowerCAmelCase : int = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(__A ) - column_reshape(__A ) , (column_reshape(__A ) - column_reshape(__A )).T , )
return covariance_sum / features.shape[1]
def snake_case_ (__A : np.ndarray , __A : int ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
__lowerCAmelCase : List[Any] = features - np.reshape(__A , (data_mean.size, 1) )
__lowerCAmelCase : Dict = np.dot(__A , centered_data.T ) / features.shape[1]
__lowerCAmelCase ,__lowerCAmelCase : int = np.linalg.eigh(__A )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCAmelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCAmelCase : Any = np.dot(filtered_eigenvectors.T , __A )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__A )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case_ (__A : np.ndarray , __A : np.ndarray , __A : int , __A : int ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = eigh(
covariance_between_classes(__A , __A , __A ) , covariance_within_classes(__A , __A , __A ) , )
__lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Tuple = np.linalg.svd(__A )
__lowerCAmelCase : str = svd_matrix[:, 0:dimensions]
__lowerCAmelCase : int = np.dot(filtered_svd_matrix.T , __A )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=__A )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case_ () -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCAmelCase : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCAmelCase : Tuple = np.array([0, 0, 0, 1, 1] )
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : List[Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__A ) as error_info:
__lowerCAmelCase : List[Any] = linear_discriminant_analysis(
__A , __A , __A , __A )
if isinstance(__A , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def snake_case_ () -> None:
__lowerCAmelCase : List[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Dict = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(__A ) as error_info:
__lowerCAmelCase : Union[str, Any] = principal_component_analysis(__A , __A )
if not np.allclose(__A , __A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCamelCase__ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Tuple = "facebook/nllb-200-distilled-600M"
UpperCamelCase_ : Optional[Any] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
UpperCamelCase_ : Any = "translator"
UpperCamelCase_ : Union[str, Any] = AutoTokenizer
UpperCamelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM
UpperCamelCase_ : Any = LANGUAGE_CODES
UpperCamelCase_ : Optional[int] = ["text", "text", "text"]
UpperCamelCase_ : Optional[Any] = ["text"]
def A_ ( self , a , a , a ) -> str:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
_UpperCamelCase = self.lang_to_code[src_lang]
_UpperCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
a , return_tensors="""pt""" , src_lang=a , tgt_lang=a )
def A_ ( self , a ) -> Any:
'''simple docstring'''
return self.model.generate(**a )
def A_ ( self , a ) -> Optional[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=a )
| 612 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase__ = "bart"
lowerCamelCase__ = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def __A() -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_UpperCamelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_UpperCamelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def __A() -> str:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
_UpperCamelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
_UpperCamelCase = faiss.IndexFlatIP(1_2_8 )
_UpperCamelCase = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def __A() -> str:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
_UpperCamelCase = elia["""train_eli5"""]
_UpperCamelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
_UpperCamelCase = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = load_indexes()
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = load_models()
lowerCamelCase__, lowerCamelCase__ = load_train_data()
def __A(lowerCAmelCase , lowerCAmelCase=1_0 ) -> int:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def __A(lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=1_0 ) -> Tuple:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (""" <P> """.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCAmelCase , )
_UpperCamelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_UpperCamelCase = """question: {} context: {}""".format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=6_4 , lowerCAmelCase=2_5_6 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.95 , lowerCAmelCase=0.8 ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
lowerCamelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
lowerCamelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
lowerCamelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
lowerCamelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
lowerCamelCase__ = action_list.index(action_st)
lowerCamelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
lowerCamelCase__ = show_type == "Show full text of passages"
else:
lowerCamelCase__ = 3
lowerCamelCase__ = True
lowerCamelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
lowerCamelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
lowerCamelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
lowerCamelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
lowerCamelCase__ = "wiki40b"
lowerCamelCase__ = "dense"
lowerCamelCase__ = "beam"
lowerCamelCase__ = 2
lowerCamelCase__ = 64
lowerCamelCase__ = 256
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
lowerCamelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
lowerCamelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
lowerCamelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCamelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCamelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCamelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCamelCase__ = None
# start main text
lowerCamelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
lowerCamelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase__ = st.text_input("Enter your question here:", "")
else:
lowerCamelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase__, lowerCamelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
lowerCamelCase__, lowerCamelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
lowerCamelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase__ = support_list[:10]
lowerCamelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
lowerCamelCase__, lowerCamelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCamelCase__, lowerCamelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
lowerCamelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
lowerCamelCase__ = res[1].strip()
if sec_titles == "":
lowerCamelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
lowerCamelCase__ = sec_titles.split(" & ")
lowerCamelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase__ = find_nearest_training(question)
lowerCamelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
lowerCamelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
lowerCamelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 612 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
UpperCAmelCase_ : Any = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
UpperCAmelCase_ : Any = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(UpperCamelCase ,1 ):
if n < _p:
# then we have our last prime to check
UpperCAmelCase_ : str = primes[:idx]
break
UpperCAmelCase_ : Tuple = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCAmelCase_ : Any = False
for r in range(UpperCamelCase ):
UpperCAmelCase_ : List[Any] = pow(UpperCamelCase ,d * 2**r ,UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCAmelCase_ : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def SCREAMING_SNAKE_CASE( ) -> None:
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 718 |
'''simple docstring'''
from __future__ import annotations
import requests
lowerCAmelCase__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase = 1 ,UpperCamelCase = "new" ,UpperCamelCase = None ) -> dict:
UpperCAmelCase_ : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
UpperCAmelCase_ : str = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(UpperCamelCase )
UpperCAmelCase_ : Dict = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" ,headers={'User-agent': 'A random string'} ,)
if response.status_code == 4_2_9:
raise requests.HTTPError
UpperCAmelCase_ : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
UpperCAmelCase_ : List[str] = {}
for id_ in range(UpperCamelCase ):
UpperCAmelCase_ : Tuple = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 471 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
def constraint_to_multiple_of(A , A , A=0 , A=None ):
_a : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_a : Any = math.floor(val / multiple ) * multiple
if x < min_val:
_a : Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
_a : Tuple = (output_size, output_size) if isinstance(A , A ) else output_size
_a , _a : str = get_image_size(A )
_a , _a : Optional[Any] = output_size
# determine new height and width
_a : Optional[int] = output_height / input_height
_a : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_a : int = scale_width
else:
# fit height
_a : str = scale_height
_a : int = constraint_to_multiple_of(scale_height * input_height , multiple=A )
_a : str = constraint_to_multiple_of(scale_width * input_width , multiple=A )
return (new_height, new_width)
class a ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_5_5 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**_UpperCAmelCase )
_a : Optional[int] = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
_a : Dict = get_size_dict(_UpperCAmelCase )
_a : Optional[Any] = do_resize
_a : Optional[int] = size
_a : List[Any] = keep_aspect_ratio
_a : Union[str, Any] = ensure_multiple_of
_a : Any = resample
_a : Optional[int] = do_rescale
_a : Any = rescale_factor
_a : Optional[int] = do_normalize
_a : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = 1 , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
_a : Union[str, Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_a : Dict = get_resize_output_image_size(
_UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : str = size if size is not None else self.size
_a : List[str] = get_size_dict(_UpperCAmelCase )
_a : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_a : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_a : int = resample if resample is not None else self.resample
_a : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str = do_normalize if do_normalize is not None else self.do_normalize
_a : Dict = image_mean if image_mean is not None else self.image_mean
_a : Optional[int] = image_std if image_std is not None else self.image_std
_a : Dict = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_a : str = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_a : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
_a : List[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_a : Union[str, Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_a : Any = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_a : str = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Dict:
_a : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_UpperCAmelCase ):
_a : Any = target_sizes.numpy()
_a : Optional[int] = []
for idx in range(len(_UpperCAmelCase ) ):
_a : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_UpperCAmelCase )
_a : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
_a : str = logits.argmax(dim=1 )
_a : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 120 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(__magic_name__ )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(__magic_name__ ) > -1:
try:
lowercase__ = curr_layer.__getattr__(__magic_name__ )
if len(__magic_name__ ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(__magic_name__ ) == 0:
break
except Exception:
if len(__magic_name__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(__magic_name__ )
else:
pair_keys.append(__magic_name__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__magic_name__ , __magic_name__ )
# update visited list
for item in pair_keys:
visited.append(__magic_name__ )
return pipeline
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
A : str = parser.parse_args()
A : Tuple = args.base_model_path
A : List[str] = args.checkpoint_path
A : Optional[int] = args.dump_path
A : Optional[int] = args.lora_prefix_unet
A : Any = args.lora_prefix_text_encoder
A : Any = args.alpha
A : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 15 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : str , snake_case__ : Dict=7_68 ):
"""simple docstring"""
super().__init__(snake_case__ )
A =proj_size
A =CLIPVisionModel(snake_case__ )
A =PaintByExampleMapper(snake_case__ )
A =nn.LayerNorm(config.hidden_size )
A =nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
A =nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a ( self : Any , snake_case__ : List[str] , snake_case__ : int=False ):
"""simple docstring"""
A =self.model(pixel_values=snake_case__ )
A =clip_output.pooler_output
A =self.mapper(latent_states[:, None] )
A =self.final_layer_norm(snake_case__ )
A =self.proj_out(snake_case__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : Tuple ):
"""simple docstring"""
super().__init__()
A =(config.num_hidden_layers + 1) // 5
A =config.hidden_size
A =1
A =nn.ModuleList(
[
BasicTransformerBlock(snake_case__ , snake_case__ , snake_case__ , activation_fn="gelu" , attention_bias=snake_case__ )
for _ in range(snake_case__ )
] )
def _a ( self : int , snake_case__ : Tuple ):
"""simple docstring"""
for block in self.blocks:
A =block(snake_case__ )
return hidden_states
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
__a : list[list[int]] = []
__a : list[int] = []
__a : List[str] = 0
__a : Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
lowercase__ =[3, 34, 4, 12, 5, 2]
lowercase__ =9
lowercase__ =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 521 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase , UpperCamelCase :List[Any] = position
UpperCamelCase :Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Dict = []
for position in positions:
UpperCamelCase , UpperCamelCase :str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(SCREAMING_SNAKE_CASE__ )
return permissible_positions
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : int ):
if is_complete(SCREAMING_SNAKE_CASE__ ):
return True
for position in get_valid_pos(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
UpperCamelCase , UpperCamelCase :Optional[int] = position
if board[y][x] == 0:
UpperCamelCase :Any = curr + 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , curr + 1 ):
return True
UpperCamelCase :Union[str, Any] = 0
return False
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :List[Any] = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Tuple = 1
if open_knight_tour_helper(SCREAMING_SNAKE_CASE__ , (i, j) , 1 ):
return board
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
def lowerCamelCase_ ( A : Optional[Any] = 50 ):
"""simple docstring"""
lowerCAmelCase_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 702 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 13 , _UpperCAmelCase = 64 , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 128 , _UpperCAmelCase=[16, 32, 64, 128] , _UpperCAmelCase = 7 , _UpperCAmelCase = 4 , _UpperCAmelCase = 37 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 10 , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 128 , _UpperCAmelCase = [2, 2, 2, 2] , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = encoder_stride
lowerCAmelCase_ = num_attention_outputs
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = embed_dim + 1
lowerCAmelCase_ = resolution
lowerCAmelCase_ = depths
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = dim
lowerCAmelCase_ = mlp_expansion_ratio
def lowercase__ ( self):
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = TFEfficientFormerModel(config=_UpperCAmelCase)
lowerCAmelCase_ = model(_UpperCAmelCase , training=_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = TFEfficientFormerForImageClassification(_UpperCAmelCase)
lowerCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = TFEfficientFormerForImageClassification(_UpperCAmelCase)
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowercase__ ( self):
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( A , A , unittest.TestCase ):
'''simple docstring'''
a :Union[str, Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a :Optional[int] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a :int = False
a :int = False
a :Optional[Any] = False
a :Dict = False
a :Optional[Any] = False
def lowercase__ ( self):
lowerCAmelCase_ = TFEfficientFormerModelTester(self)
lowerCAmelCase_ = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def lowercase__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def lowercase__ ( self):
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def lowercase__ ( self):
pass
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(_UpperCAmelCase)
lowerCAmelCase_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def lowercase__ ( self):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = model_class(_UpperCAmelCase)
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) , training=_UpperCAmelCase)
lowerCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
if hasattr(self.model_tester , '''encoder_seq_length'''):
lowerCAmelCase_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
lowerCAmelCase_ = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase_ = outputs.decoder_hidden_states
self.asseretIsInstance(_UpperCAmelCase , (list, tuple))
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
lowerCAmelCase_ = getattr(self.model_tester , '''seq_length''' , _UpperCAmelCase)
lowerCAmelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , _UpperCAmelCase)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
lowerCAmelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase)
@slow
def lowercase__ ( self):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = TFEfficientFormerModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def lowercase__ ( self):
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
lowerCAmelCase_ = getattr(self.model_tester , '''seq_length''' , _UpperCAmelCase)
lowerCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , _UpperCAmelCase)
lowerCAmelCase_ = getattr(self.model_tester , '''key_length''' , _UpperCAmelCase)
lowerCAmelCase_ = getattr(self.model_tester , '''chunk_length''' , _UpperCAmelCase)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
lowerCAmelCase_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_UpperCAmelCase)
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) , training=_UpperCAmelCase)
lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(_UpperCAmelCase)
lowerCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) , training=_UpperCAmelCase)
lowerCAmelCase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase__ ( self):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase_ = model_class(_UpperCAmelCase)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_UpperCAmelCase)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase_ = model(_UpperCAmelCase)
self.assertTrue(outputs_dict is not None)
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self):
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def lowercase__ ( self):
lowerCAmelCase_ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='''tf''')
# forward pass
lowerCAmelCase_ = model(**_UpperCAmelCase , training=_UpperCAmelCase)
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
lowerCAmelCase_ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4))
@slow
def lowercase__ ( self):
lowerCAmelCase_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors='''tf''')
# forward pass
lowerCAmelCase_ = model(**_UpperCAmelCase , training=_UpperCAmelCase)
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
lowerCAmelCase_ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4))
| 413 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Optional[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['''MobileNetV2FeatureExtractor''']
A__ : List[Any] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 171 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : Union[str, Any] = 16
A__ : int = 32
def UpperCamelCase( __UpperCamelCase : Tuple ):
return int(x / 2**20 )
class __snake_case :
def __enter__( self : str):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *A_ : Dict):
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ : str = torch.cuda.memory_allocated()
lowerCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase_ : List[str] = bamb(self.end - self.begin)
lowerCAmelCase_ : Optional[int] = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,):
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Any = load_dataset(
'''glue''' ,'''mrpc''' ,split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(__UpperCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ : Union[str, Any] = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : List[str] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : str = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
# Initialize accelerator
lowerCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Any = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : Any = int(config['''seed'''] )
lowerCAmelCase_ : Dict = int(config['''batch_size'''] )
lowerCAmelCase_ : Dict = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Any = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase_ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ : List[str] = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCAmelCase_ : List[Any] = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ : List[Any] = 0
# Now we train the model
lowerCAmelCase_ : Union[str, Any] = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase )
lowerCAmelCase_ : Any = outputs.loss
lowerCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,)
parser.add_argument(
'''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,)
parser.add_argument(
'''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,)
parser.add_argument(
'''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,)
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : int = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 171 | 1 |
def lowerCamelCase_(lowerCamelCase_ ) -> Union[str, Any]:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase = 1
UpperCAmelCase = 1
while repunit:
UpperCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_(lowerCamelCase_ = 1_000_000 ) -> Any:
UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCamelCase : List[str] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __magic_name__ ( A__ ):
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : int =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] =['''input_ids''', '''attention_mask''']
lowercase : Any =TaTokenizer
lowercase : List[int] =[]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Tuple=1_00 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase = [F'<extra_id_{i}>' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase = len(set(filter(lambda UpperCamelCase__ : bool("extra_id_" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
UpperCAmelCase = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCamelCase__ , )
return max_model_length
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"<extra_id_\d+>" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 457 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = XLMTokenizer
__a : int = False
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : str = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Dict = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = XLMTokenizer(self.vocab_file ,self.merges_file )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'lower'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['low', 'er</w>']
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + ['<unk>']
SCREAMING_SNAKE_CASE_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 105 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_UpperCamelCase : str = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Any = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
_UpperCamelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
_UpperCamelCase : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_UpperCamelCase : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a ( a_ ):
UpperCAmelCase_ : str =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a ( a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_UpperCamelCase : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_UpperCamelCase : int = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a_ )
class a :
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
lowercase = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
lowercase = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
lowercase = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
lowercase = len(_lowerCamelCase )
lowercase = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.' )
lowercase = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )['input_ids']
lowercase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1_6 , _lowerCamelCase = 6_4 , _lowerCamelCase = 4 , ):
lowercase = reader_input['input_ids']
lowercase , lowercase , lowercase = reader_output[:3]
lowercase = len(_lowerCamelCase )
lowercase = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
lowercase = []
for doc_id in sorted_docs:
lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase = sequence_ids.index(self.pad_token_id )
else:
lowercase = len(_lowerCamelCase )
lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class a ( a_, a_ ):
UpperCAmelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[int] =["input_ids", "attention_mask"]
| 134 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = IFInpaintingPipeline
lowerCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCAmelCase ( self : Any , _lowercase : Any , _lowercase : Any=0 ):
"""simple docstring"""
if str(A_ ).startswith('''mps''' ):
_UpperCamelCase: Dict = torch.manual_seed(A_ )
else:
_UpperCamelCase: Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase: Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
_UpperCamelCase: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
_UpperCamelCase: int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 271 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 30
SCREAMING_SNAKE_CASE__ = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = [10, 50, 80]
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1_28
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
SCREAMING_SNAKE_CASE__ = 0.01
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(A_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLForSequenceClassification(A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ : List[str] = () if is_tf_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert isinstance(A_ , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFTransfoXLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE__ = model.generate(A_ , max_length=2_00 , do_sample=A_ )
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
| 100 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 716 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : List[str] = ['''image_processor''', '''tokenizer''']
A__ : Dict = '''LayoutLMv3ImageProcessor'''
A__ : Tuple = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCamelCase__ =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCamelCase , )
lowerCamelCase__ =kwargs.pop("feature_extractor" )
lowerCamelCase__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowerCamelCase__ =self.image_processor(images=_lowerCamelCase , return_tensors=_lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase__ =features["words"]
lowerCamelCase__ =self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel values
lowerCamelCase__ =features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCamelCase__ =self.get_overflowing_images(_lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCamelCase__ =images
return encoded_inputs
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase__ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(_lowerCamelCase )} and {len(_lowerCamelCase )}''' )
return images_with_overflow
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCamelCase , )
return self.image_processor_class
@property
def _a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCamelCase , )
return self.image_processor
| 530 | """simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 530 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a__: str = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''',return_value=__lowerCamelCase ) as mock_head:
A__ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
A__ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def UpperCamelCase ( self ):
with self.assertRaises(__lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''',subfolder='''feature_extractor''' )
self.assertIsNotNone(__lowerCamelCase )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls ):
A__ = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token,repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def UpperCamelCase ( self ):
A__ = ViTImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub('''test-image-processor''',use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase,getattr(__lowerCamelCase,__lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token,repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCamelCase,repo_id='''test-image-processor''',push_to_hub=__lowerCamelCase,use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase,getattr(__lowerCamelCase,__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = ViTImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''',use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase,getattr(__lowerCamelCase,__lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token,repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCamelCase,repo_id='''valid_org/test-image-processor-org''',push_to_hub=__lowerCamelCase,use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase,getattr(__lowerCamelCase,__lowerCamelCase ) )
def UpperCamelCase ( self ):
CustomImageProcessor.register_for_auto_class()
A__ = CustomImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''},)
A__ = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor",trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__,'''CustomImageProcessor''' )
| 212 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__: str = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Any:
A__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A__ = [1_44, 1_92, 2_40]
A__ = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
A__ = [96, 1_20, 1_44]
A__ = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
A__ = [64, 80, 96]
A__ = [16, 16, 24, 48, 64, 80, 3_20]
A__ = 0.05
A__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = 5_12
A__ = 16
A__ = 21
A__ = '''pascal-voc-id2label.json'''
else:
A__ = 10_00
A__ = '''imagenet-1k-id2label.json'''
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False )->Optional[Any]:
for i in range(1 , 6 ):
if f"layer_{i}." in name:
A__ = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
A__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
A__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
A__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
A__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
A__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
A__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
A__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
A__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
A__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
A__ = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
A__ = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
A__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
A__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
A__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
A__ = name.replace(f".global_rep.{i}.weight" , '''.layernorm.weight''' )
if f".global_rep.{i}.bias" in name:
A__ = name.replace(f".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
A__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
A__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
A__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
A__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
A__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
A__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
A__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
A__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
A__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
A__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
A__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
A__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
A__ = '''mobilevit.''' + name
return name
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=False )->Optional[int]:
if base_model:
A__ = ''''''
else:
A__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(UpperCamelCase__ )
if key[:8] == "encoder.":
A__ = key[8:]
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[0][6:] ) - 1
A__ = int(key_split[3] )
A__ = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
A__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A__ = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def UpperCamelCase__( )->List[str]:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False )->Dict:
A__ = get_mobilevit_config(UpperCamelCase__ )
# load original state_dict
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval()
else:
A__ = MobileViTForImageClassification(UpperCamelCase__ ).eval()
A__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**UpperCamelCase__ )
A__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A__ = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A__ = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A__ = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
A__ = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
A__ = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
A__ = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
A__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
A__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='''apple''' )
model.push_to_hub(UpperCamelCase__ , organization='''apple''' )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__: Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 212 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = CpmAntTokenizer
_UpperCAmelCase : int = False
def __lowerCamelCase ( self : Dict ) ->Optional[int]:
super().setUp()
lowerCamelCase__ : List[Any] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : Tuple = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
lowerCamelCase__ : int = '''今天天气真好!'''
lowerCamelCase__ : Tuple = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
lowerCamelCase__ : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase__ : Dict = '''今天天气真好!'''
lowerCamelCase__ : Optional[int] = [tokenizer.bos_token] + tokens
lowerCamelCase__ : List[Any] = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
lowerCamelCase__ : List[str] = tokenizer.decode(A )
self.assertEqual(A , A )
| 315 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase__ : List[str] = Dataset.from_dict(UpperCAmelCase )
return dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __lowerCamelCase ( self : Optional[int] ) ->Tuple:
lowerCamelCase__ : Optional[Any] = get_dataset()
lowerCamelCase__ : int = make_duplicate_clusters(A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self : List[str] ) ->Any:
lowerCamelCase__ : str = get_dataset()
lowerCamelCase__ , lowerCamelCase__ : Any = deduplicate_dataset(A )
self.assertEqual(len(A ) , 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
| 315 | 1 |
import numpy as np
def a ( A__ : Optional[Any] , A__ : Tuple , A__ : Union[str, Any] = 1e-12 , A__ : Tuple = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowercase ) == np.iscomplexobj(_lowercase )
_lowercase =np.iscomplexobj(_lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_lowercase =False
_lowercase =0
_lowercase =0
_lowercase =1e12
while not convergence:
# Multiple matrix by the vector.
_lowercase =np.dot(_lowercase , _lowercase )
# Normalize the resulting output vector.
_lowercase =w / np.linalg.norm(_lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_lowercase =vector.conj().T if is_complex else vector.T
_lowercase =np.dot(_lowercase , np.dot(_lowercase , _lowercase ) )
# Check convergence.
_lowercase =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_lowercase =True
_lowercase =lambda_
if is_complex:
_lowercase =np.real(lambda_ )
return lambda_, vector
def a ( ) -> None:
"""simple docstring"""
_lowercase =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_lowercase =np.array([41, 4, 20] )
_lowercase =real_input_matrix.astype(np.complexaaa )
_lowercase =np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_lowercase =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_lowercase =real_input_matrix
_lowercase =real_vector
elif problem_type == "complex":
_lowercase =complex_input_matrix
_lowercase =complex_vector
# Our implementation.
_lowercase =power_iteration(_lowercase , _lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_lowercase =np.linalg.eigh(_lowercase )
# Last eigenvalue is the maximum one.
_lowercase =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_lowercase =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowercase ) - np.abs(_lowercase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 704 |
def a ( A__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowercase =[0] * len(A__ )
_lowercase =[]
_lowercase =[]
_lowercase =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
_lowercase =queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print('Cycle exists' )
else:
print(A__ )
# Adjacency List of Graph
lowercase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 380 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Any = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = self.dummy_uncond_unet
snake_case__ : int = ScoreSdeVeScheduler()
snake_case__ : Optional[Any] = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : str = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ ).images
snake_case__ : Tuple = torch.manual_seed(0 )
snake_case__ : List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ , return_dict=A_ )[
0
]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : List[Any] = '''google/ncsnpp-church-256'''
snake_case__ : int = UNetaDModel.from_pretrained(A_ )
snake_case__ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(A_ )
snake_case__ : str = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
snake_case__ : str = torch.manual_seed(0 )
snake_case__ : str = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=A_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
snake_case__ : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 347 |
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowerCAmelCase_ : str = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ : List[Any] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 171 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase_ = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase_ = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'retribert'
def __init__( self : str,lowercase_ : Union[str, Any]=3_0_5_2_2,lowercase_ : List[Any]=7_6_8,lowercase_ : List[str]=8,lowercase_ : Tuple=1_2,lowercase_ : Tuple=3_0_7_2,lowercase_ : List[str]="gelu",lowercase_ : Union[str, Any]=0.1,lowercase_ : Any=0.1,lowercase_ : Tuple=5_1_2,lowercase_ : List[str]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : int=1E-12,lowercase_ : Tuple=True,lowercase_ : Any=1_2_8,lowercase_ : str=0,**lowercase_ : str,)-> int:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_,**lowercase_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = share_encoders
A__ = projection_dim
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spm_char.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowerCAmelCase = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = self.__dict__.copy()
lowerCAmelCase__ : int = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.sp_model.piece_to_id(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Dict = [1]
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + suffix_ones
return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 565 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
UpperCamelCase = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCamelCase = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase__ )
def A ( self : Tuple , **UpperCamelCase__ : Any ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase__ )
def A ( self : Tuple , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = 1_6
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
UpperCamelCase = [['cat', 'nasa badge'], ['person']]
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = 1_6
UpperCamelCase = len(UpperCamelCase__ )
UpperCamelCase = max([len(UpperCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase__ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = 1_6
UpperCamelCase = inputs['input_ids']
UpperCamelCase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=UpperCamelCase__ , query_images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 718 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = 'french fries'
UpperCamelCase = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=UpperCamelCase__ )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , UpperCamelCase__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=UpperCamelCase__ )
UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase = False
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : Optional[int] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 324 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
a__ : Optional[Any] = MobileBertTokenizer
a__ : Optional[Any] = MobileBertTokenizerFast
a__ : Optional[int] = True
a__ : Any = True
a__ : Dict = filter_non_english
a__ : Optional[int] = 'google/mobilebert-uncased'
def a ( self : Tuple ):
super().setUp()
__UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def a ( self : List[Any] , _lowercase : int ):
__UpperCAmelCase = 'UNwant\u00E9d,running'
__UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = 'UNwant\u00E9d,running'
__UpperCAmelCase = tokenizer.tokenize(_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# With lower casing
__UpperCAmelCase = self.get_tokenizer(do_lower_case=_UpperCAmelCase )
__UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=_UpperCAmelCase )
__UpperCAmelCase = 'UNwant\u00E9d,running'
__UpperCAmelCase = tokenizer.tokenize(_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(_UpperCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : str ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a ( self : List[str] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : str ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Any ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a ( self : Optional[int] ):
__UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCAmelCase = {}
for i, token in enumerate(_UpperCAmelCase ):
__UpperCAmelCase = i
__UpperCAmelCase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a ( self : Optional[Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a ( self : Optional[int] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a ( self : Optional[Any] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a ( self : List[str] ):
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def a ( self : List[str] ):
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def a ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , )
__UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase , '''do_lower_case''' ) else False
__UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a ( self : int ):
__UpperCAmelCase = ['的', '人', '有']
__UpperCAmelCase = ''.join(_UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = False
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__UpperCAmelCase = tokenizer_r.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer_p.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase )
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 49 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( _UpperCamelCase ):
@staticmethod
@abstractmethod
def __lowercase ( _UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def __lowercase ( self : str ):
raise NotImplementedError()
| 358 | 0 |
'''simple docstring'''
import cmath
import math
def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = math.radians(A__ )
lowerCAmelCase_ : Union[str, Any] = math.radians(A__ )
# Convert voltage and current to rectangular form
lowerCAmelCase_ : List[Any] = cmath.rect(A__ , A__ )
lowerCAmelCase_ : List[str] = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
assert isinstance(A__ , A__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCAmelCase_ : Any = f'The input value of [n={number}] has to be > 0'
raise ValueError(A__ )
else:
lowerCAmelCase_ : Optional[int] = sylvester(number - 1 )
lowerCAmelCase_ : Dict = num - 1
lowerCAmelCase_ : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 398 | 1 |
def UpperCAmelCase_ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 509 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = XGLMTokenizer
A__ : Optional[int] = XGLMTokenizerFast
A__ : int = True
A__ : Optional[Any] = True
def snake_case_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Optional[Any] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[Any] ):
__lowercase : int = '''<pad>'''
__lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_snake_case ) , 1008 )
def snake_case_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def snake_case_ ( self : Dict ):
__lowercase : List[str] = XGLMTokenizer(_snake_case , keep_accents=_snake_case )
__lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case_ ( self : List[str] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def snake_case_ ( self : Any ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_snake_case , f.name )
__lowercase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=_snake_case )
__lowercase : List[str] = pickle.dumps(_snake_case )
pickle.loads(_snake_case )
def snake_case_ ( self : str ):
if not self.test_rust_tokenizer:
return
__lowercase : Tuple = self.get_tokenizer()
__lowercase : Optional[int] = self.get_rust_tokenizer()
__lowercase : Dict = '''I was born in 92000, and this is falsé.'''
__lowercase : int = tokenizer.tokenize(_snake_case )
__lowercase : int = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : List[str] = tokenizer.encode(_snake_case )
__lowercase : Union[str, Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[Any] = '''Hello World!'''
__lowercase : int = [2, 3_1227, 4447, 35]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase : Any = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
# fmt: off
__lowercase : Optional[Any] = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/xglm-564M''' , padding=_snake_case , )
| 509 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
A = precision
A = ceil(precision / 14 )
A = 42_6880 * Decimal(1_0005 ).sqrt()
A = 1
A = 1359_1409
A = Decimal(snake_case__ )
for k in range(1 , snake_case__ ):
A = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowercase = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowercase__ )
__lowercase = len(lowercase__ )
__lowercase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__lowercase = []
for char_count in range(lowercase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowercase__ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 80 |
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
while b:
lowerCAmelCase_ , lowerCAmelCase_ : int = b, a % b
return a
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowercase__ , a % b )
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 600 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCamelCase__ = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(_lowercase ) , torch_builtin(_lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(_lowercase ) , gelu_new(_lowercase ) ) )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCamelCase__ = get_activation("""gelu""" )
lowerCamelCase__ = get_activation("""gelu_10""" )
lowerCamelCase__ = torch_builtin(_lowercase )
lowerCamelCase__ = geluaa(_lowercase )
lowerCamelCase__ = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(_lowercase ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(_lowercase ):
get_activation("""bogus""" )
with self.assertRaises(_lowercase ):
get_activation(_lowercase )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = get_activation("""gelu""" )
lowerCamelCase__ = 1
lowerCamelCase__ = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_lowercase ):
lowerCamelCase__ = acta.a
| 719 |
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 235 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : Union[str, Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""encoder-decoder"""
SCREAMING_SNAKE_CASE__ =True
def __init__( self, **_a ) -> Optional[Any]:
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__SCREAMING_SNAKE_CASE = kwargs.pop("encoder" )
__SCREAMING_SNAKE_CASE = encoder_config.pop("model_type" )
__SCREAMING_SNAKE_CASE = kwargs.pop("decoder" )
__SCREAMING_SNAKE_CASE = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(_a, **_a )
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(_a, **_a )
__SCREAMING_SNAKE_CASE = True
@classmethod
def __lowerCAmelCase ( cls, _a, _a, **_a ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **_a )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.encoder.to_dict()
__SCREAMING_SNAKE_CASE = self.decoder.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 720 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""facebook/bart-large-mnli"""
SCREAMING_SNAKE_CASE__ =(
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
SCREAMING_SNAKE_CASE__ ="""text_classifier"""
SCREAMING_SNAKE_CASE__ =AutoTokenizer
SCREAMING_SNAKE_CASE__ =AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE__ =["""text""", ["""text"""]]
SCREAMING_SNAKE_CASE__ =["""text"""]
def __lowerCAmelCase ( self ) -> Dict:
super().setup()
__SCREAMING_SNAKE_CASE = self.model.config
__SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__SCREAMING_SNAKE_CASE = int(_a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __lowerCAmelCase ( self, _a, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(_a ), [f'''This example is {label}''' for label in labels], return_tensors="pt", padding="max_length", )
def __lowerCAmelCase ( self, _a ) -> Tuple:
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 214 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "xlm-roberta-xl"
def __init__( self , _a=2_5_0_8_8_0 , _a=2_5_6_0 , _a=3_6 , _a=3_2 , _a=1_0_2_4_0 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_4 , _a=1 , _a=0.02 , _a=1e-0_5 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Optional[int]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Optional[int] = hidden_act
_a : Optional[Any] = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Dict = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : str = layer_norm_eps
_a : str = position_embedding_type
_a : Optional[int] = use_cache
_a : str = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 14 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Dict = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = 'unispeech-sat'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=504 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : int = feat_extract_norm
UpperCAmelCase : int = feat_extract_activation
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = conv_bias
UpperCAmelCase : Union[str, Any] = num_conv_pos_embeddings
UpperCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase : Any = len(self.conv_dim )
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : int = hidden_dropout
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Optional[Any] = feat_proj_dropout
UpperCAmelCase : Optional[int] = final_dropout
UpperCAmelCase : Any = layerdrop
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : int = vocab_size
UpperCAmelCase : int = num_clusters
UpperCAmelCase : int = do_stable_layer_norm
UpperCAmelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : List[Any] = apply_spec_augment
UpperCAmelCase : Union[str, Any] = mask_time_prob
UpperCAmelCase : Dict = mask_time_length
UpperCAmelCase : List[str] = mask_time_min_masks
UpperCAmelCase : Dict = mask_feature_prob
UpperCAmelCase : str = mask_feature_length
UpperCAmelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase : int = num_codevectors_per_group
UpperCAmelCase : str = num_codevector_groups
UpperCAmelCase : Any = contrastive_logits_temperature
UpperCAmelCase : Union[str, Any] = feat_quantizer_dropout
UpperCAmelCase : str = num_negatives
UpperCAmelCase : int = codevector_dim
UpperCAmelCase : List[Any] = proj_codevector_dim
UpperCAmelCase : Any = diversity_loss_weight
# ctc loss
UpperCAmelCase : Any = ctc_loss_reduction
UpperCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 160 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase__ ( yaml.SafeLoader):
"""simple docstring"""
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase = [tuple(__a ) if isinstance(__a , __a ) else key for key in keys]
lowerCamelCase = Counter(__a )
lowerCamelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def _a (self , __a , __a=False ):
'''simple docstring'''
lowerCamelCase = super().construct_mapping(__a , deep=__a )
self._check_no_duplicates_on_constructed_node(__a )
return mapping
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase = full_content[1:].index("---" ) + 1
lowerCamelCase = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCAmelCase__ )
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _a (cls , __a ):
'''simple docstring'''
with open(__a , encoding="utf-8" ) as readme_file:
lowerCamelCase , lowerCamelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__a )
else:
return cls()
def _a (self , __a ):
'''simple docstring'''
if path.exists():
with open(__a , encoding="utf-8" ) as readme_file:
lowerCamelCase = readme_file.read()
else:
lowerCamelCase = None
lowerCamelCase = self._to_readme(__a )
with open(__a , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__a )
def _a (self , __a = None ):
'''simple docstring'''
if readme_content is not None:
lowerCamelCase , lowerCamelCase = _split_yaml_from_readme(__a )
lowerCamelCase = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowerCamelCase = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _a (cls , __a ):
'''simple docstring'''
lowerCamelCase = yaml.load(__a , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__a )
def _a (self ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__a , allow_unicode=__a , encoding="utf-8" , ).decode("utf-8" )
a_ : int = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ : Optional[int] = ap.parse_args()
a_ : str = Path(args.readme_filepath)
a_ : List[str] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 484 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.dummy_uncond_unet
lowerCamelCase = KarrasVeScheduler()
lowerCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" ).images
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" , return_dict=__a )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = "google/ncsnpp-celebahq-256"
lowerCamelCase = UNetaDModel.from_pretrained(__a )
lowerCamelCase = KarrasVeScheduler()
lowerCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(num_inference_steps=20 , generator=__a , output_type="numpy" ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
lowerCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 484 | 1 |
"""simple docstring"""
import math
class __lowercase :
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = 0.0
__a : List[Any] = 0.0
for i in range(len(_UpperCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __A ( ) -> None:
# Training Examples ( m, n )
__a : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a : List[Any] = SelfOrganizingMap()
__a : Dict = 3
__a : Union[str, Any] = 0.5
for _ in range(a_):
for j in range(len(a_)):
# training sample
__a : List[str] = training_samples[j]
# Compute the winning vector
__a : Optional[Any] = self_organizing_map.get_winner(a_ , a_)
# Update the winning vector
__a : Optional[int] = self_organizing_map.update(a_ , a_ , a_ , a_)
# classify test sample
__a : List[str] = [0, 0, 0, 1]
__a : str = self_organizing_map.get_winner(a_ , a_)
# results
print(F"""Clusters that the test sample belongs to : {winner}""")
print(F"""Weights that have been trained : {weights}""")
# running the main() function
if __name__ == "__main__":
main() | 52 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowercase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowercase = {
'''camembert-base''': 512,
}
__lowercase = '''▁'''
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=["<s>NOTUSED", "</s>NOTUSED"] , __lowercase = None , **__lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase :Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token
__UpperCamelCase :List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__UpperCamelCase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowercase))
__UpperCamelCase :Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__UpperCamelCase :Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
__UpperCamelCase :int = len(self.fairseq_tokens_to_ids)
__UpperCamelCase :List[Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
__UpperCamelCase :int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase :Tuple = [self.cls_token_id]
__UpperCamelCase :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1, 1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[Any] = [self.sep_token_id]
__UpperCamelCase :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCamelCase__ ( self) -> int:
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowercase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = []
__UpperCamelCase :List[str] = ''''''
__UpperCamelCase :Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase) + token
__UpperCamelCase :Optional[Any] = True
__UpperCamelCase :List[str] = []
else:
current_sub_tokens.append(__lowercase)
__UpperCamelCase :int = False
out_string += self.sp_model.decode(__lowercase)
return out_string.strip()
def __getstate__( self) -> Dict:
__UpperCamelCase :Dict = self.__dict__.copy()
__UpperCamelCase :Any = None
return state
def __setstate__( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__UpperCamelCase :List[str] = {}
__UpperCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
if not os.path.isdir(__lowercase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__UpperCamelCase :Any = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowercase)
elif not os.path.isfile(self.vocab_file):
with open(__lowercase , '''wb''') as fi:
__UpperCamelCase :int = self.sp_model.serialized_model_proto()
fi.write(__lowercase)
return (out_vocab_file,)
| 167 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(_lowerCAmelCase ) , _lowerCAmelCase )
return number - int(_lowerCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 700 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
snake_case__ , snake_case__ , snake_case__ : Tuple = equationa
snake_case__ , snake_case__ , snake_case__ : Dict = equationa
# Calculate the determinants of the matrices
snake_case__ : Union[str, Any] = aa * ba - aa * ba
snake_case__ : str = ca * ba - ca * ba
snake_case__ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case__ : List[str] = determinant_x / determinant
snake_case__ : Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 301 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__a ,__a=7 ,__a=3 ,__a=18 ,__a=30 ,__a=400 ,__a=True ,__a=None ,__a=True ,__a=None ,__a=True ,__a=[0.5, 0.5, 0.5] ,__a=[0.5, 0.5, 0.5] ,__a=False ,) -> Optional[Any]:
snake_case : Dict = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case : List[str] = parent
snake_case : str = batch_size
snake_case : int = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Any = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : Optional[int] = size
snake_case : Tuple = do_center_crop
snake_case : Optional[Any] = crop_size
snake_case : Optional[Any] = do_normalize
snake_case : Dict = image_mean
snake_case : str = image_std
snake_case : int = do_reduce_labels
def snake_case_ ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase__ ( ):
snake_case : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case : List[str] = Image.open(dataset[0]["""file"""] )
snake_case : str = Image.open(dataset[1]["""file"""] )
return image, map
def lowerCamelCase__ ( ):
snake_case : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case : str = Image.open(ds[0]["""file"""] )
snake_case : Any = Image.open(ds[1]["""file"""] )
snake_case : List[str] = Image.open(ds[2]["""file"""] )
snake_case : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a (__UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Tuple:
snake_case : Union[str, Any] = BeitImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Tuple:
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowercase_ ,"""size""" ) )
self.assertTrue(hasattr(lowercase_ ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowercase_ ,"""center_crop""" ) )
self.assertTrue(hasattr(lowercase_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowercase_ ,"""image_std""" ) )
def snake_case_ ( self ) -> str:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels ,lowercase_ )
snake_case : str = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=lowercase_ )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels ,lowercase_ )
def snake_case_ ( self ) -> Optional[int]:
pass
def snake_case_ ( self ) -> Optional[int]:
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
snake_case : Union[str, Any] = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def snake_case_ ( self ) -> Optional[int]:
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
snake_case : Optional[int] = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def snake_case_ ( self ) -> Tuple:
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
snake_case : Optional[Any] = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def snake_case_ ( self ) -> Dict:
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
snake_case : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case : List[str] = image_processing(image_inputs[0] ,maps[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
snake_case : List[str] = image_processing(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case : int = prepare_semantic_single_inputs()
snake_case : str = image_processing(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
snake_case : List[str] = prepare_semantic_batch_inputs()
snake_case : List[Any] = image_processing(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape ,(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(
encoding["""labels"""].shape ,(
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
self.assertEqual(encoding["""labels"""].dtype ,torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def snake_case_ ( self ) -> int:
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case : str = prepare_semantic_single_inputs()
snake_case : Tuple = image_processing(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
snake_case : List[Any] = True
snake_case : Optional[int] = image_processing(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 116 |
lowerCAmelCase_ = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def A_ ( lowercase_ ) -> str:
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
_snake_case : str = int(lowercase_ )
_snake_case : Any = ''''''
_snake_case : List[Any] = False
if decimal < 0:
_snake_case : List[str] = True
decimal *= -1
while decimal > 0:
_snake_case , _snake_case : Tuple = divmod(lowercase_ , 16 )
_snake_case : Tuple = values[remainder] + hexadecimal
_snake_case : List[str] = '''0x''' + hexadecimal
if negative:
_snake_case : int = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
'''simple docstring'''
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( *a_ : float ):
__a = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE ( a_ : float , a_ : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( a_ : float , a_ : float , a_ : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( a_ : float , a_ : float , a_ : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( a_ : float , a_ : float , a_ : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def SCREAMING_SNAKE_CASE ( a_ : float , a_ : float , a_ : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 706 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ = "path-to-your-trained-model"
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
UpperCAmelCase_ = "A photo of sks dog in a bucket"
UpperCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 490 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :int = """donut-swin"""
UpperCamelCase_ :str = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=[2, 2, 6, 2] , _lowercase=[3, 6, 12, 24] , _lowercase=7 , _lowercase=4.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=False , _lowercase=0.02 , _lowercase=1e-5 , **_lowercase , )-> Optional[Any]:
super().__init__(**_lowercase )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embed_dim
UpperCamelCase_ = depths
UpperCamelCase_ = len(_lowercase )
UpperCamelCase_ = num_heads
UpperCamelCase_ = window_size
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = hidden_act
UpperCamelCase_ = use_absolute_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_ = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
| 628 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 628 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.task_name.lower()
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'train'
_UpperCamelCase : Any = 'dev'
_UpperCamelCase : List[str] = 'test'
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : GlueDataTrainingArguments
_UpperCamelCase : str
_UpperCamelCase : List[InputFeatures]
def __init__( self : List[Any] , a : GlueDataTrainingArguments , a : PreTrainedTokenizerBase , a : Optional[int] = None , a : Union[str, Split] = Split.train , a : Optional[str] = None , )-> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , a , )
lowercase__ = args
lowercase__ = glue_processors[args.task_name]()
lowercase__ = glue_output_modes[args.task_name]
if isinstance(a , a ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowercase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '.lock'
with FileLock(a ):
if os.path.exists(a ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase__ = self.processor.get_test_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase__ = examples[:limit_length]
lowercase__ = glue_convert_examples_to_features(
a , a , max_length=args.max_seq_length , label_list=a , output_mode=self.output_mode , )
lowercase__ = time.time()
torch.save(self.features , a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict )-> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any] , a : Optional[int] )-> InputFeatures:
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
return self.label_list
| 45 |
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
UpperCAmelCase__ : Optional[int] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Tuple = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Dict = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Dict = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCAmelCase__ : Dict = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : int = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase__ : Any = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
UpperCAmelCase__ : Any = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
UpperCAmelCase__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
UpperCAmelCase__ : Optional[int] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
import tarfile
UpperCAmelCase__ : int = tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCAmelCase__ : Tuple = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a__ ( lowerCAmelCase__ ) -> Any:
import tarfile
UpperCAmelCase__ : str = tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Dict = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCAmelCase__ : Any = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCAmelCase__ : Dict = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : List[str] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 75 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE: Any = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: List[Any] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=None ):
__snake_case : Dict = np.random.default_rng(_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = length
__snake_case : Any = rng.normal(size=(length,) ).astype(np.floataa )
__snake_case : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , _UpperCAmelCase ):
return {"x": self.x[i], "y": self.y[i]}
class __SCREAMING_SNAKE_CASE ( torch.nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ):
super().__init__()
__snake_case : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case : Any = True
def lowercase_ ( self , _UpperCAmelCase=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__snake_case : List[str] = False
return x * self.a[0] + self.b[0]
class __SCREAMING_SNAKE_CASE ( torch.nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ):
super().__init__()
__snake_case : str = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() )
__snake_case : Union[str, Any] = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() )
__snake_case : Tuple = True
def lowercase_ ( self , _UpperCAmelCase=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__snake_case : Any = False
return x * self.a + self.b
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case : int = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__snake_case : Tuple = load_dataset('csv' , data_files=_A )
__snake_case : Dict = datasets['train'].unique('label' )
__snake_case : Optional[int] = {v: i for i, v in enumerate(_A )}
def tokenize_function(__UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : List[Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A , padding='max_length' )
if "label" in examples:
__snake_case : str = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case : Dict = datasets.map(
_A , batched=_A , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__snake_case : List[Any] = DataLoader(tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=2 )
__snake_case : Optional[int] = DataLoader(tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=1 )
return train_dataloader, eval_dataloader
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
from __future__ import annotations
def a__ ( snake_case , snake_case , snake_case , snake_case ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = (l + r) // 2
if v[m] >= key:
__SCREAMING_SNAKE_CASE : int = m
else:
__SCREAMING_SNAKE_CASE : Dict = m # noqa: E741
return r
def a__ ( snake_case ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
__SCREAMING_SNAKE_CASE : List[Any] = [0] * len(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : List[str] = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] < tail[0]:
__SCREAMING_SNAKE_CASE : Tuple = v[i]
elif v[i] > tail[length - 1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = v[i]
length += 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 | 0 |
from __future__ import annotations
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not nums:
return 0
__A = nums[0]
__A = 0
for num in nums[1:]:
__A , __A = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
from __future__ import annotations
from typing import Any
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
create_state_space_tree(lowerCAmelCase__ , [] , 0 )
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 205 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class A :
__snake_case = 42
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__, nn.Convad ) or isinstance(UpperCamelCase__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class A :
__snake_case = 42
__snake_case = 42
__snake_case = 1
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = True
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = Tracker(self.dest )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip, UpperCamelCase__ ) )
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip, UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"
f" destination module has {len(UpperCamelCase__ )}." )
for dest_m, src_m in zip(UpperCamelCase__, UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
class A ( nn.Module ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase_ = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"Unexpected layer name {k}"
lowerCAmelCase_ = len(UpperCamelCase__ ) + 1
feature_blocks.append((f"res{block_index}", v) )
lowerCAmelCase_ = nn.ModuleDict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase__, out_feat_keys=UpperCamelCase__, feature_blocks=self._feature_blocks, )
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if x not in self:
lowerCAmelCase_ = self.convert_name_to_timm(UpperCamelCase__ )
lowerCAmelCase_ = partial(lambda: (timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ).eval(), None) )
else:
lowerCAmelCase_ = super().__getitem__(UpperCamelCase__ )
return val
class A ( __UpperCAmelCase ):
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowerCAmelCase_ = RegNetModel
else:
lowerCAmelCase_ = RegNetForImageClassification
return val
def __UpperCamelCase ( _A , _A , _A ):
for from_key, to_key in keys:
lowerCAmelCase_ = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A = True , ):
print(f"Converting {name}..." )
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ = from_model_func()
lowerCAmelCase_ = our_model_func(_A ).eval()
lowerCAmelCase_ = ModuleTransfer(src=_A , dest=_A , raise_if_mismatch=_A )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(_A )
if from_state_dict is not None:
lowerCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase_ = manually_copy_vissl_head(_A , our_model.state_dict() , _A )
our_model.load_state_dict(_A )
lowerCAmelCase_ = our_model(_A , output_hidden_states=_A )
lowerCAmelCase_ = (
our_outputs.logits if isinstance(_A , _A ) else our_outputs.last_hidden_state
)
lowerCAmelCase_ = from_model(_A )
lowerCAmelCase_ = from_output[-1] if type(_A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(_A , _A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_A , )
lowerCAmelCase_ = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_A , )
print(f"Pushed {name}" )
def __UpperCamelCase ( _A , _A = None , _A = True ):
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = 1000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
lowerCAmelCase_ = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
lowerCAmelCase_ = NameToOurModelFuncMap()
lowerCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_A , _A ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , model_dir=str(_A ) , map_location='''cpu''' )
lowerCAmelCase_ = model_func()
# check if we have a head, if yes add it
lowerCAmelCase_ = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase_ = model_state_dict['''trunk''']
model.load_state_dict(_A )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _A , _A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _A , _A , _A , )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 431 |
def __UpperCamelCase ( _A ):
if not isinstance(_A , _A ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCAmelCase_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : Optional[Any] = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = ["""MobileViTFeatureExtractor"""]
UpperCamelCase__ : Optional[Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A_( A , A , **A ):
UpperCAmelCase_ = AutoConfig.from_pretrained(A , **A )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(A )
model.save_pretrained(A )
AutoTokenizer.from_pretrained(A ).save_pretrained(A )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 486 | 0 |
'''simple docstring'''
import requests
A = '''''' # <-- Put your OpenWeatherMap appid here!
A = '''https://api.openweathermap.org/data/2.5/'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str = "Chicago" , lowerCAmelCase__ : str = APPID) -> Tuple:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals()).json()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : str = "Kolkata, India" , lowerCAmelCase__ : str = APPID) -> Dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals()).json()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : float = 5_5.6_8 , lowerCAmelCase__ : float = 1_2.5_7 , lowerCAmelCase__ : str = APPID) -> List[str]:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break | 125 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 580 | 0 |
from maths.prime_check import is_prime
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase_ )
if is_prime(UpperCamelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : Dict = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowerCamelCase : Union[str, Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Any = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Optional[Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
_lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCAmelCase ():
'''simple docstring'''
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="""red""" )
plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 196 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase_ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase_ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase_ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __a ( self , lowercase_ ) -> str:
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __a ( self , lowercase_ , lowercase_ , lowercase_=0.9 , lowercase_=3 , lowercase_=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
_snake_case : int = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ) , word_tokenize(lowercase_ ) , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
else:
_snake_case : List[str] = [
meteor_score.single_meteor_score(lowercase_ , lowercase_ , alpha=lowercase_ , beta=lowercase_ , gamma=lowercase_ )
for ref, pred in zip(lowercase_ , lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 326 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ) -> int:
_snake_case : Optional[int] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_snake_case : Tuple = Dataset.from_dict(lowercase_ )
return dataset
class A (__UpperCAmelCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : int = get_dataset()
_snake_case : Dict = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __a ( self ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = get_dataset()
_snake_case , _snake_case : Optional[int] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 326 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _a :
'''simple docstring'''
UpperCamelCase__ = 42
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = """dict"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""Translation""" , init=lowercase_ , repr=lowercase_ )
def __call__( self) -> List[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def __lowercase ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class _a :
'''simple docstring'''
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = """dict"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""TranslationVariableLanguages""" , init=lowercase_ , repr=lowercase_ )
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Dict = sorted(set(self.languages)) if self.languages else None
lowercase__: int = len(self.languages) if self.languages else None
def __call__( self) -> List[Any]:
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def __lowercase ( self , UpperCAmelCase_) -> str:
'''simple docstring'''
lowercase__: str = set(self.languages)
if self.languages and set(_SCREAMING_SNAKE_CASE) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_SCREAMING_SNAKE_CASE) - lang_set))}) are not in valid set ({', '.join(_SCREAMING_SNAKE_CASE)}).""")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase__: Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
lowercase__ , lowercase__: Dict = zip(*sorted(_SCREAMING_SNAKE_CASE))
return {"language": languages, "translation": translations}
def __lowercase ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 713 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
lowercase__: Optional[int] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase_) , UpperCAmelCase_)
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , x.transpose()))
lowercase__: str = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: str = np.random.randn(3 , 4)
lowercase__: str = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , transpose(UpperCAmelCase_).numpy()))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , transpose(UpperCAmelCase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = np.random.randn(3 , 4)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , transpose(UpperCAmelCase_).numpy()))
lowercase__: Optional[int] = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , transpose(UpperCAmelCase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = np.random.randn(3 , 4)
lowercase__: Dict = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_) , np.asarray(transpose(UpperCAmelCase_))))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
lowercase__: Optional[Any] = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(transpose(UpperCAmelCase_ , axes=(1, 2, 0)) , np.asarray(transpose(UpperCAmelCase_ , axes=(1, 2, 0)))))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: List[str] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , np.reshape(UpperCAmelCase_ , (4, 3))))
lowercase__: Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , np.reshape(UpperCAmelCase_ , (12, 5))))
@require_torch
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Any = np.random.randn(3 , 4)
lowercase__: Dict = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , reshape(UpperCAmelCase_ , (4, 3)).numpy()))
lowercase__: List[str] = np.random.randn(3 , 4 , 5)
lowercase__: Any = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , reshape(UpperCAmelCase_ , (12, 5)).numpy()))
@require_tf
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: int = np.random.randn(3 , 4)
lowercase__: Optional[int] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , reshape(UpperCAmelCase_ , (4, 3)).numpy()))
lowercase__: int = np.random.randn(3 , 4 , 5)
lowercase__: str = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , reshape(UpperCAmelCase_ , (12, 5)).numpy()))
@require_flax
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = np.random.randn(3 , 4)
lowercase__: Dict = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (4, 3)) , np.asarray(reshape(UpperCAmelCase_ , (4, 3)))))
lowercase__: Union[str, Any] = np.random.randn(3 , 4 , 5)
lowercase__: Any = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(reshape(UpperCAmelCase_ , (12, 5)) , np.asarray(reshape(UpperCAmelCase_ , (12, 5)))))
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , np.squeeze(UpperCAmelCase_)))
lowercase__: int = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , np.squeeze(UpperCAmelCase_ , axis=2)))
@require_torch
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Optional[int] = np.random.randn(1 , 3 , 4)
lowercase__: Any = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , squeeze(UpperCAmelCase_).numpy()))
lowercase__: str = np.random.randn(1 , 4 , 1 , 5)
lowercase__: str = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , squeeze(UpperCAmelCase_ , axis=2).numpy()))
@require_tf
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = np.random.randn(1 , 3 , 4)
lowercase__: List[str] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , squeeze(UpperCAmelCase_).numpy()))
lowercase__: Any = np.random.randn(1 , 4 , 1 , 5)
lowercase__: Optional[int] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , squeeze(UpperCAmelCase_ , axis=2).numpy()))
@require_flax
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Any = np.random.randn(1 , 3 , 4)
lowercase__: List[str] = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_) , np.asarray(squeeze(UpperCAmelCase_))))
lowercase__: Optional[int] = np.random.randn(1 , 4 , 1 , 5)
lowercase__: int = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(squeeze(UpperCAmelCase_ , axis=2) , np.asarray(squeeze(UpperCAmelCase_ , axis=2))))
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , np.expand_dims(UpperCAmelCase_ , axis=1)))
@require_torch
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Optional[Any] = np.random.randn(3 , 4)
lowercase__: Optional[int] = torch.tensor(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , expand_dims(UpperCAmelCase_ , axis=1).numpy()))
@require_tf
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = np.random.randn(3 , 4)
lowercase__: Optional[Any] = tf.constant(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , expand_dims(UpperCAmelCase_ , axis=1).numpy()))
@require_flax
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = np.random.randn(3 , 4)
lowercase__: Any = jnp.array(UpperCAmelCase_)
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase_ , axis=1) , np.asarray(expand_dims(UpperCAmelCase_ , axis=1))))
| 120 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE__ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
# Let's go
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 663 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias
SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 | 1 |
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : Optional[torch.FloatTensor] = None
UpperCAmelCase : torch.FloatTensor = None
UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( snake_case ):
def __init__( self : int , a_ : str=1 , a_ : Any=0 , a_ : List[str]=2 , a_ : List[Any]=5_1_2 , a_ : Union[str, Any]="cls" , a_ : Dict=False , a_ : Optional[int]=True , **a_ : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
snake_case: Any =project_dim
snake_case: Optional[Any] =pooler_fn
snake_case: List[str] =learn_encoder
snake_case: Optional[Any] =use_attention_mask
class a_ ( snake_case ):
UpperCAmelCase : int = [r"""pooler""", r"""logit_scale"""]
UpperCAmelCase : List[Any] = [r"""position_ids""", r"""predictions.decoder.bias"""]
UpperCAmelCase : Union[str, Any] = """roberta"""
UpperCAmelCase : Tuple = RobertaSeriesConfig
def __init__( self : int , a_ : int ) -> Any:
super().__init__(a_ )
snake_case: List[str] =XLMRobertaModel(a_ )
snake_case: Optional[int] =nn.Linear(config.hidden_size , config.project_dim )
snake_case: Optional[Any] =getattr(a_ , 'has_pre_transformation' , a_ )
if self.has_pre_transformation:
snake_case: str =nn.Linear(config.hidden_size , config.project_dim )
snake_case: Optional[int] =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase ( self : Dict , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , ) -> Tuple:
snake_case: Dict =return_dict if return_dict is not None else self.config.use_return_dict
snake_case: List[str] =self.base_model(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_attentions=a_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a_ , )
if self.has_pre_transformation:
snake_case: Union[str, Any] =outputs['hidden_states'][-2]
snake_case: List[str] =self.pre_LN(a_ )
snake_case: Optional[int] =self.transformation_pre(a_ )
return TransformationModelOutput(
projection_state=a_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case: str =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 347 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
__snake_case : Optional[Any] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
__snake_case : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1e-5 , __lowerCamelCase=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case : int = tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case : Optional[int] = [1] * inputs.shape.rank
__snake_case : Union[str, Any] = shape_list(__lowerCamelCase )[axis]
__snake_case : Any = tf.reshape(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
__snake_case : Dict = tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0 , __lowerCamelCase=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case : Any = tf.shape(__lowerCamelCase )
__snake_case : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case : Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , tf.Tensor ):
__snake_case : Dict = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case : Dict = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__snake_case : Optional[int] = np.asarray(__lowerCamelCase )
__snake_case : Any = 1
__snake_case : List[Any] = np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case : str = np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
__snake_case : Any = chunk_data
else:
__snake_case : Optional[Any] = data
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if name in group.attrs:
__snake_case : Optional[int] = [n.decode("utf8" ) if hasattr(__lowerCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
__snake_case : int = []
__snake_case : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( __lowerCamelCase ):
def _expand_single_ad_tensor(__lowerCamelCase ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 81 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase__ ( __UpperCamelCase = "AAPL" )-> str:
UpperCamelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCamelCase = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
UpperCamelCase = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 301 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ = ["""gpt2"""]
SCREAMING_SNAKE_CASE__ = """gpt2"""
if is_tf_available():
class __lowerCamelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
super().__init__()
lowercase_ = tokenizer
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
lowercase_ = TFGPTaLMHeadModel.from_config(UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = self.tokenizer(UpperCAmelCase )
lowercase_ = tokenized["input_ids"].to_tensor()
lowercase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase_ = self.model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase_ = [GPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase_ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase_ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A__ ( self ) -> str:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase_ = tokenizer([test_inputs] , return_tensors="tf" )
lowercase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase_ = python_outputs[key].numpy()
lowercase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.function(UpperCAmelCase )
for test_inputs in self.test_sentences:
lowercase_ = tf.constant(UpperCAmelCase )
lowercase_ = compiled_tokenizer(UpperCAmelCase )
lowercase_ = tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = ModelToSave(tokenizer=UpperCAmelCase )
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = model.serving(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase_ = Path(UpperCAmelCase ) / "saved.model"
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"serving_default": model.serving} )
lowercase_ = tf.saved_model.load(UpperCAmelCase )
lowercase_ = loaded_model.signatures["serving_default"](UpperCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase ) # Build model with some sample inputs
lowercase_ = tf_tokenizer.get_config()
lowercase_ = TFGPTaTokenizer.from_config(UpperCAmelCase )
lowercase_ = model_from_config(UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase_ = 123123
for max_length in [3, 5, 1024]:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase , max_length=UpperCAmelCase )
lowercase_ = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 704 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=16 , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=14 , UpperCAmelCase=10 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=[1, 2, 3, 4, 5] , UpperCAmelCase=25 , UpperCAmelCase=5 , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = d_model
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = prediction_length
lowercase_ = context_length
lowercase_ = cardinality
lowercase_ = num_time_features
lowercase_ = lags_sequence
lowercase_ = embedding_dimension
lowercase_ = is_training
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = context_length
lowercase_ = prediction_length + label_length
lowercase_ = label_length
lowercase_ = moving_average
lowercase_ = autocorrelation_factor
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = config.context_length + max(config.lags_sequence )
lowercase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, _past_length] )
lowercase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowercase_ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.get_config()
lowercase_ = self.prepare_autoformer_inputs_dict(UpperCAmelCase )
return config, inputs_dict
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = AutoformerModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.encoder_last_hidden_state
lowercase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_encoder()
encoder.save_pretrained(UpperCAmelCase )
lowercase_ = AutoformerEncoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = model.create_network_inputs(**UpperCAmelCase )
lowercase_ , lowercase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase_ = encoder(inputs_embeds=UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowercase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_decoder()
decoder.save_pretrained(UpperCAmelCase )
lowercase_ = AutoformerDecoder.from_pretrained(UpperCAmelCase ).to(UpperCAmelCase )
lowercase_ = decoder(
trend=UpperCAmelCase , inputs_embeds=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = AutoformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase )
lowercase_ , lowercase_ = model_class.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertEqual(info["missing_keys"] , [] )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCAmelCase )
@unittest.skip(reason="Model has no tokens embeddings" )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = inspect.signature(getattr(UpperCAmelCase , "forward" ) )
# The main input is the name of the argument after `self`
lowercase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
lowercase_ = getattr(self.model_tester , "seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "decoder_seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "encoder_seq_length" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "d_model" , UpperCAmelCase )
lowercase_ = getattr(self.model_tester , "num_attention_heads" , UpperCAmelCase )
lowercase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase_ = len(UpperCAmelCase )
lowercase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# decoder attentions
lowercase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase_ = outputs.cross_attentions
self.assertIsInstance(UpperCAmelCase , (list, tuple) )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(UpperCAmelCase ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int]="train-batch.pt" ):
'''simple docstring'''
lowercase_ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCamelCase , repo_type="dataset" )
lowercase_ = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
return batch
@require_torch
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch()
with torch.no_grad():
lowercase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowercase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowercase_ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowercase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(UpperCAmelCase )
lowercase_ = prepare_batch("val-batch.pt" )
with torch.no_grad():
lowercase_ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowercase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCAmelCase )
lowercase_ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCAmelCase )
lowercase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCAmelCase , rtol=1e-1 ) )
| 601 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__snake_case = parent
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return {}
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = '''<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'''
__snake_case = '''\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '''
return [html_string_a, html_string_a]
@require_bsa
class lowerCAmelCase ( _a , unittest.TestCase):
__lowercase : Optional[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.feature_extraction_class()
# Test not batched input
__snake_case = get_html_strings()[0]
__snake_case = feature_extractor(__SCREAMING_SNAKE_CASE )
# fmt: off
__snake_case = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__snake_case = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , __SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , __SCREAMING_SNAKE_CASE )
# Test batched
__snake_case = get_html_strings()
__snake_case = feature_extractor(__SCREAMING_SNAKE_CASE )
# fmt: off
__snake_case = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__snake_case = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , __SCREAMING_SNAKE_CASE )
| 24 |
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
assert column_title.isupper()
__snake_case = 0
__snake_case = len(_lowerCAmelCase ) - 1
__snake_case = 0
while index >= 0:
__snake_case = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ) -> str:
'''simple docstring'''
A = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
A = {
'input_ids': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
A = model(snake_case )['last_hidden_state']
A = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 109 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( UpperCamelCase ,unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowerCAmelCase_ : Tuple = """ssube/stable-diffusion-x4-upscaler-onnx"""
def A_ ( self : Any , snake_case : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
A = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case ) )
A = torch.manual_seed(snake_case )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : List[str] ) -> str:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A = ort.SessionOptions()
A = False
return options
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
A = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 109 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 114 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a__ , unet=a__ , scheduler=a__)
@torch.no_grad()
def __call__( self , a__ = 1 , a__ = None , a__ = 0.0 , a__ = 50 , a__ = "pil" , a__ = True , **a__ , ):
"""simple docstring"""
_lowerCamelCase : Tuple = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a__ , )
_lowerCamelCase : Optional[int] = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase : Tuple = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a__)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_lowerCamelCase : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowerCamelCase : Union[str, Any] = {}
if accepts_eta:
_lowerCamelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps):
_lowerCamelCase : List[Any] = self.scheduler.scale_model_input(a__ , a__)
# predict the noise residual
_lowerCamelCase : Optional[int] = self.unet(a__ , a__).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : List[str] = self.scheduler.step(a__ , a__ , a__ , **a__).prev_sample
# decode the image latents with the VAE
_lowerCamelCase : Optional[int] = self.vqvae.decode(a__).sample
_lowerCamelCase : Dict = (image / 2 + 0.5).clamp(0 , 1)
_lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_lowerCamelCase : List[Any] = self.numpy_to_pil(a__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__)
| 114 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
lowercase__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def a_ ( self ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , ) -> int:
_a = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_a = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
_a = TER(
normalized=__UpperCamelCase , no_punct=__UpperCamelCase , asian_support=__UpperCamelCase , case_sensitive=__UpperCamelCase , )
_a = sb_ter.corpus_score(__UpperCamelCase , __UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 276 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''convbert'''
def __init__( self , __UpperCamelCase=30_522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=768 , __UpperCamelCase=2 , __UpperCamelCase=9 , __UpperCamelCase=1 , __UpperCamelCase=None , **__UpperCamelCase , ) -> Optional[int]:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 276 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int=8 ):
"""simple docstring"""
__lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( __UpperCAmelCase ):
def __init__( self : List[Any] , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
__lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : str ):
"""simple docstring"""
if latents is None:
__lowerCAmelCase = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase = latents.to(snake_case__ )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self : str , snake_case__ : str=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device(F"cuda:{gpu_id}" )
__lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__lowerCAmelCase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
__lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : Any , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : torch.FloatTensor , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 100 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = torch.cat(snake_case__ , dim=0 )
if isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = torch.cat(snake_case__ , dim=0 )
if isinstance(snake_case__ , snake_case__ ):
__lowerCAmelCase = torch.cat(snake_case__ , dim=0 )
__lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCAmelCase = image_embeds.repeat_interleave(snake_case__ , dim=0 )
__lowerCAmelCase = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
__lowerCAmelCase = hint.repeat_interleave(snake_case__ , dim=0 )
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
__lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.movq.config.latent_channels
__lowerCAmelCase , __lowerCAmelCase = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = {"image_embeds": image_embeds, "hint": hint}
__lowerCAmelCase = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase = variance_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
__lowerCAmelCase = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase = image * 0.5 + 0.5
__lowerCAmelCase = image.clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 611 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) ->Any:
try:
_lowerCamelCase : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase : Any = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE__ : Optional[Any] =parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE__ : List[str] =parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE__ : List[str] =parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE__ : Optional[int] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE__ : List[Any] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE__ : Any =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE__ : List[str] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE__ : Optional[int] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
try:
import faiss # noqa
except ImportError:
_lowerCamelCase : List[Any] = unittest.skip('''test requires faiss''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
try:
import regex # noqa
except ImportError:
_lowerCamelCase : Any = unittest.skip('''test requires regex''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
try:
import elasticsearch # noqa
except ImportError:
_lowerCamelCase : Dict = unittest.skip('''test requires elasticsearch''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
_lowerCamelCase : int = unittest.skip('''test requires sqlalchemy''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
if not config.TORCH_AVAILABLE:
_lowerCamelCase : List[str] = unittest.skip('''test requires PyTorch''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if not config.TF_AVAILABLE:
_lowerCamelCase : int = unittest.skip('''test requires TensorFlow''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
if not config.JAX_AVAILABLE:
_lowerCamelCase : str = unittest.skip('''test requires JAX''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
if not config.PIL_AVAILABLE:
_lowerCamelCase : str = unittest.skip('''test requires Pillow''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
def _require_spacy_model(SCREAMING_SNAKE_CASE_ ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if not _run_slow_tests or _run_slow_tests == 0:
_lowerCamelCase : Tuple = unittest.skip('''test is slow''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
if not _run_local_tests or _run_local_tests == 0:
_lowerCamelCase : Tuple = unittest.skip('''test is local''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Dict:
if not _run_packaged_tests or _run_packaged_tests == 0:
_lowerCamelCase : Union[str, Any] = unittest.skip('''test is packaged''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
if not _run_remote_tests or _run_remote_tests == 0:
_lowerCamelCase : Any = unittest.skip('''test requires remote''' )(SCREAMING_SNAKE_CASE_ )
return test_case
def UpperCamelCase ( *SCREAMING_SNAKE_CASE_ ) ->Any:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith('''test''' ):
for decorator in decorators:
_lowerCamelCase : List[str] = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = 0
__snake_case = 1
__snake_case = 2
@contextmanager
def UpperCamelCase ( SCREAMING_SNAKE_CASE_=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE_=1e-16 ) ->int:
_lowerCamelCase : Any = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
# Change the url to an invalid url so that the connection hangs
_lowerCamelCase : int = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_lowerCamelCase : int = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_lowerCamelCase : Optional[int] = url
_lowerCamelCase : List[Any] = e.args[0]
_lowerCamelCase : Union[str, Any] = (max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
_lowerCamelCase : Tuple = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def UpperCamelCase ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : List[str] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def UpperCamelCase ( ) ->Optional[Any]:
import gc
gc.collect()
_lowerCamelCase : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase ( ) ->int:
import gc
gc.collect()
_lowerCamelCase : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
try:
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''500''' ) or str(SCREAMING_SNAKE_CASE_ ).startswith('''502''' ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> int:
_lowerCamelCase : List[str] = returncode
_lowerCamelCase : Optional[Any] = stdout
_lowerCamelCase : Any = stderr
async def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
while True:
_lowerCamelCase : Dict = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) ->_RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : List[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = []
def tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" ):
_lowerCamelCase : List[str] = line.decode('''utf-8''' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label='''stderr:''' ) ),
] , timeout=SCREAMING_SNAKE_CASE_ , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=180 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ) ->_RunOutput:
_lowerCamelCase : Tuple = asyncio.get_event_loop()
_lowerCamelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) )
_lowerCamelCase : Optional[int] = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
_lowerCamelCase : Any = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCamelCase ( ) ->Tuple:
_lowerCamelCase : str = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_lowerCamelCase : Optional[int] = re.sub(R'''^gw''' , '''''' , SCREAMING_SNAKE_CASE_ , 0 , re.M )
return int(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) ->Union[str, Any]:
_lowerCamelCase : Optional[int] = 2_9500
_lowerCamelCase : Tuple = pytest_xdist_worker_id()
return port + uniq_delta
| 718 | """simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = '''laion/clap-htsat-unfused'''
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
def a__ ( self , **_lowercase ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self , **_lowercase ) -> str:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Dict = self.get_feature_extractor(do_normalize=_lowercase , padding_value=1.0 )
_lowerCamelCase : Optional[int] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : List[Any] = floats_list((3, 1000) )
_lowerCamelCase : Optional[int] = feature_extractor(_lowercase , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = processor(audios=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Dict = self.get_feature_extractor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = '''This is a test string'''
_lowerCamelCase : Dict = processor(text=_lowercase )
_lowerCamelCase : Any = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> List[str]:
_lowerCamelCase : List[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.batch_decode(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : str = self.get_feature_extractor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 558 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def lowerCAmelCase_ ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
| 616 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = 'ResNetConfig'
# Base docstring
_lowerCAmelCase = 'microsoft/resnet-50'
_lowerCAmelCase = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_lowerCAmelCase = 'microsoft/resnet-50'
_lowerCAmelCase = 'tiger cat'
_lowerCAmelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 3 , __magic_name__ = 1 , __magic_name__ = "relu" ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = nn.Convad(
__magic_name__ , __magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=kernel_size // 2 , bias=__magic_name__ )
A_ : Union[str, Any] = nn.BatchNormad(__magic_name__ )
A_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.convolution(__magic_name__ )
A_ : str = self.normalization(__magic_name__ )
A_ : Union[str, Any] = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__()
A_ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A_ : str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A_ : str = config.num_channels
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
A_ : Optional[int] = self.embedder(__magic_name__ )
A_ : Optional[Any] = self.pooler(__magic_name__ )
return embedding
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 2 ):
"""simple docstring"""
super().__init__()
A_ : Dict = nn.Convad(__magic_name__ , __magic_name__ , kernel_size=1 , stride=__magic_name__ , bias=__magic_name__ )
A_ : Optional[Any] = nn.BatchNormad(__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Any = self.convolution(__magic_name__ )
A_ : List[str] = self.normalization(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : str = (
ResNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
A_ : Union[str, Any] = nn.Sequential(
ResNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ ) , ResNetConvLayer(__magic_name__ , __magic_name__ , activation=__magic_name__ ) , )
A_ : Optional[Any] = ACTaFN[activation]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : List[str] = hidden_state
A_ : Any = self.layer(__magic_name__ )
A_ : Dict = self.shortcut(__magic_name__ )
hidden_state += residual
A_ : Any = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" , __magic_name__ = 4 ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = out_channels // reduction
A_ : Union[str, Any] = (
ResNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
A_ : Optional[int] = nn.Sequential(
ResNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 ) , ResNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ ) , ResNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=__magic_name__ ) , )
A_ : Optional[int] = ACTaFN[activation]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = hidden_state
A_ : Optional[int] = self.layer(__magic_name__ )
A_ : List[Any] = self.shortcut(__magic_name__ )
hidden_state += residual
A_ : str = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 2 , __magic_name__ = 2 , ):
"""simple docstring"""
super().__init__()
A_ : Dict = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
A_ : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , stride=__magic_name__ , activation=config.hidden_act ) , *[layer(__magic_name__ , __magic_name__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Tuple = input
for layer in self.layers:
A_ : Optional[Any] = layer(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__()
A_ : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A_ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__magic_name__ , config.depths[1:] ):
self.stages.append(ResNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ ) )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(__magic_name__ )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__magic_name__ , hidden_states=__magic_name__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = ResNetConfig
__magic_name__ = """resnet"""
__magic_name__ = """pixel_values"""
__magic_name__ = True
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if isinstance(__magic_name__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
A_ : Tuple = value
_lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , A__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
A_ : int = config
A_ : Any = ResNetEmbeddings(__magic_name__ )
A_ : str = ResNetEncoder(__magic_name__ )
A_ : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.embedder(__magic_name__ )
A_ : Optional[int] = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Tuple = encoder_outputs[0]
A_ : List[Any] = self.pooler(__magic_name__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
A_ : Any = config.num_labels
A_ : str = ResNetModel(__magic_name__ )
# classification head
A_ : Dict = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ):
"""simple docstring"""
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.resnet(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
A_ : Optional[int] = self.classifier(__magic_name__ )
A_ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Dict = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Any = '''single_label_classification'''
else:
A_ : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
A_ : List[str] = MSELoss()
if self.num_labels == 1:
A_ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : Optional[Any] = loss_fct(__magic_name__ , __magic_name__ )
elif self.config.problem_type == "single_label_classification":
A_ : List[str] = CrossEntropyLoss()
A_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Union[str, Any] = BCEWithLogitsLoss()
A_ : Optional[Any] = loss_fct(__magic_name__ , __magic_name__ )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , A__ , )
class __UpperCAmelCase( A__ , A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
super()._init_backbone(__magic_name__ )
A_ : Any = [config.embedding_size] + config.hidden_sizes
A_ : int = ResNetEmbeddings(__magic_name__ )
A_ : Any = ResNetEncoder(__magic_name__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@replace_return_docstrings(output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ):
"""simple docstring"""
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = self.embedder(__magic_name__ )
A_ : int = self.encoder(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Dict = outputs.hidden_states
A_ : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A_ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__magic_name__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__magic_name__ , )
| 236 | import heapq
def a__ ( a ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(a , [-1 * len(a ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : int = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : int = heapq.heappop(a )[1][0]
chosen_vertices.add(a )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : Dict = elem[1][1].index(a )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(a )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 236 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :List[Any]=2 , __magic_name__ :Optional[int]=3 , __magic_name__ :List[str]=4 , __magic_name__ :List[Any]=2 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :Dict=True , __magic_name__ :Tuple=True , __magic_name__ :str=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=36 , __magic_name__ :List[Any]=2 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :Optional[int]=37 , __magic_name__ :List[Any]="gelu" , __magic_name__ :Tuple=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :List[str]=16 , __magic_name__ :Optional[Any]=2 , __magic_name__ :List[Any]=0.02 , __magic_name__ :Dict=6 , __magic_name__ :int=6 , __magic_name__ :Optional[Any]=3 , __magic_name__ :int=4 , __magic_name__ :str=None , __magic_name__ :int=1000 , ):
'''simple docstring'''
a = parent
a = batch_size
a = num_channels
a = image_size
a = patch_size
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = coordinate_size
a = shape_size
a = num_labels
a = num_choices
a = scope
a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a = text_seq_length
a = (image_size // patch_size) ** 2 + 1
a = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a = bbox[i, j, 3]
a = bbox[i, j, 1]
a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
a = bbox[i, j, 2]
a = bbox[i, j, 0]
a = tmp_coordinate
a = tf.constant(__magic_name__ )
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.text_seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :Any ):
'''simple docstring'''
a = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
a = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
a = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[str] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = self.num_labels
a = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
a = self.num_labels
a = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Any , __magic_name__ :str , __magic_name__ :List[str] , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = 2
a = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
a = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs
a = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :str , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :str=False ):
'''simple docstring'''
a = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
a = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TFLayoutLMvaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
a = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
a = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
a = prepared_for_class.pop("""input_ids""" )
a = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
a = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
a = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
a = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
a = -100
a = tf.convert_to_tensor(__magic_name__ )
a = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
a = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
a = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
a = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
a = prepared_for_class.keys() - inputs_dict.keys()
a = inspect.signature(model.call ).parameters
a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
a = {0: """input_ids"""}
for label_key in label_keys:
a = signature_names.index(__magic_name__ )
a = label_key
a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
a = prepared_for_class[value]
a = tuple(__magic_name__ )
# Send to model
a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> Optional[Any]:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
a = tf.constant([[1, 2]] )
a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
a = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
a = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
a = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 468 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''generated'''
def __init__( self :Any , *__magic_name__ :Tuple , **__magic_name__ :Tuple ):
'''simple docstring'''
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Any=None , __magic_name__ :List[str]=None , __magic_name__ :Tuple=None , __magic_name__ :str=None , **__magic_name__ :List[Any] , ):
'''simple docstring'''
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :Dict , *__magic_name__ :Optional[int] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __magic_name__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , __magic_name__ ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
a = self.tokenizer(*__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self :Tuple , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
a = super().__call__(*__magic_name__ , **__magic_name__ )
if (
isinstance(args[0] , __magic_name__ )
and all(isinstance(__magic_name__ , __magic_name__ ) for el in args[0] )
and all(len(__magic_name__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **__magic_name__ :Any ):
'''simple docstring'''
a = self._parse_and_tokenize(__magic_name__ , truncation=__magic_name__ , **__magic_name__ )
return inputs
def lowerCamelCase__ ( self :Any , __magic_name__ :int , **__magic_name__ :int ):
'''simple docstring'''
if self.framework == "pt":
a , a = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
a , a = tf.shape(model_inputs["""input_ids"""] ).numpy()
a = generate_kwargs.get("""min_length""" , self.model.config.min_length )
a = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__magic_name__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
a = self.model.generate(**__magic_name__ , **__magic_name__ )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(__magic_name__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :Any=ReturnType.TEXT , __magic_name__ :int=False ):
'''simple docstring'''
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
a = {
F'{self.return_name}_text': self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
}
records.append(__magic_name__ )
return records
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''summary'''
def __call__( self :Any , *__magic_name__ :List[str] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''translation'''
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self :str , *__magic_name__ :Union[str, Any] , __magic_name__ :Any=TruncationStrategy.DO_NOT_TRUNCATE , __magic_name__ :Optional[Any]=None , __magic_name__ :List[str]=None ):
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __magic_name__ ):
return self.tokenizer._build_translation_inputs(
*__magic_name__ , return_tensors=self.framework , truncation=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
else:
return super()._parse_and_tokenize(*__magic_name__ , truncation=__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str]=None , __magic_name__ :Union[str, Any]=None , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a , a , a = super()._sanitize_parameters(**__magic_name__ )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get("""task""" , self.task )
a = task.split("""_""" )
if task and len(__magic_name__ ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self :Optional[Any] , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
| 468 | 1 |
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def SCREAMING_SNAKE_CASE ( *lowercase_ : Any ):
with open(UpperCamelCase__ , """r""" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
lowercase_ : Union[str, Any] = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowercase_ : Tuple = torch.device('''cuda''', local_rank)
lowercase_ : int = socket.gethostname()
lowercase_ : Tuple = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowercase_ : Union[str, Any] = dist.get_rank()
lowercase_ : str = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
"""simple docstring"""
from typing import Any
class lowercase_ :
def __init__( self : Optional[Any] , _lowercase : Any ):
lowerCAmelCase__ : Tuple = data
lowerCAmelCase__ : List[str] = None
def __repr__( self : str ):
return f"Node({self.data})"
class lowercase_ :
def __init__( self : Optional[Any] ):
lowerCAmelCase__ : str = None
def __iter__( self : Optional[Any] ):
lowerCAmelCase__ : Union[str, Any] = self.head
while node:
yield node.data
lowerCAmelCase__ : List[str] = node.next
def __len__( self : Any ):
return sum(1 for _ in self )
def __repr__( self : List[str] ):
return "->".join([str(_lowercase ) for item in self] )
def __getitem__( self : List[Any] , _lowercase : int ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Dict , _lowercase : int , _lowercase : Any ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase__ : Any = self.head
for _ in range(_lowercase ):
lowerCAmelCase__ : int = current.next
lowerCAmelCase__ : Dict = data
def _lowerCAmelCase ( self : Tuple , _lowercase : Any ):
self.insert_nth(len(self ) , _lowercase )
def _lowerCAmelCase ( self : Optional[Any] , _lowercase : Any ):
self.insert_nth(0 , _lowercase )
def _lowerCAmelCase ( self : Any , _lowercase : int , _lowercase : Any ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase__ : str = Node(_lowercase )
if self.head is None:
lowerCAmelCase__ : int = new_node
elif index == 0:
lowerCAmelCase__ : Tuple = self.head # link new_node to head
lowerCAmelCase__ : Any = new_node
else:
lowerCAmelCase__ : Optional[int] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Optional[int] = temp.next
lowerCAmelCase__ : int = temp.next
lowerCAmelCase__ : Union[str, Any] = new_node
def _lowerCAmelCase ( self : List[Any] ): # print every node data
print(self )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.delete_nth(0 )
def _lowerCAmelCase ( self : Optional[int] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self : str , _lowercase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase__ : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase__ : List[Any] = self.head.next
else:
lowerCAmelCase__ : Optional[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Optional[int] = temp.next
lowerCAmelCase__ : int = temp.next
lowerCAmelCase__ : Optional[int] = temp.next.next
return delete_node.data
def _lowerCAmelCase ( self : Dict ):
return self.head is None
def _lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
lowerCAmelCase__ : str = current.next
# Make the current node's next point backwards
lowerCAmelCase__ : List[str] = prev
# Make the previous node be the current node
lowerCAmelCase__ : Any = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase__ : str = next_node
# Return prev in order to put the head at the end
lowerCAmelCase__ : Tuple = prev
def lowercase__ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(lowerCamelCase ) == i
linked_list.insert_nth(lowerCamelCase , i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(lowerCamelCase ) == 9
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase__ : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(-8 , 1 ) )
def lowercase__ ( ) -> None:
lowerCAmelCase__ : Optional[int] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-1_92.5_55_55,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase__ : int = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase__ : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase__ : List[Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase__ : str = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
lowerCAmelCase__ : Union[str, Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowerCamelCase )
print("\nReading/changing Node data using indexing:" )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase__ : List[Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(lowerCamelCase )
print(F"length of linked_list is : {len(lowerCamelCase )}" )
if __name__ == "__main__":
main()
| 308 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : str = 3
lowerCAmelCase__ : Dict = (3_2, 3_2)
lowerCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def _lowerCAmelCase ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowercase )
@property
def _lowerCAmelCase ( self : List[Any] ):
def extract(*_lowercase : Union[str, Any] , **_lowercase : List[Any] ):
class lowercase_ :
def __init__( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = torch.ones([0] )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : List[Any] ):
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def _lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[str] = self.dummy_cond_unet
lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowercase )
lowerCAmelCase__ : Tuple = self.dummy_vae
lowerCAmelCase__ : int = self.dummy_text_encoder
lowerCAmelCase__ : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase__ : Optional[int] = 7_7
lowerCAmelCase__ : str = self.dummy_image.to(_lowercase )
lowerCAmelCase__ : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Tuple = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
lowerCAmelCase__ : str = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase__ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowercase , )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
lowerCAmelCase__ : List[str] = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowercase , return_dict=_lowercase , )[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ : List[str] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[Any] = self.dummy_cond_unet
lowerCAmelCase__ : List[Any] = PNDMScheduler(skip_prk_steps=_lowercase )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase__ : List[Any] = 7_7
lowerCAmelCase__ : Optional[int] = self.dummy_image.to(_lowercase )
# put models in fp16
lowerCAmelCase__ : Optional[int] = unet.half()
lowerCAmelCase__ : Optional[int] = vae.half()
lowerCAmelCase__ : int = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
lowerCAmelCase__ : Dict = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase__ : Union[str, Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : Any = torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = alt_pipe(
[prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" , image=_lowercase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ : List[Any] = init_image.resize((7_6_0, 5_0_4) )
lowerCAmelCase__ : str = "BAAI/AltDiffusion"
lowerCAmelCase__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[Any] = "A fantasy landscape, trending on artstation"
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type="np" , )
lowerCAmelCase__ : Optional[Any] = output.images[0]
lowerCAmelCase__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCAmelCase__ : Tuple = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase__ : Union[str, Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCAmelCase__ : Dict = "BAAI/AltDiffusion"
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : Any = "A fantasy landscape, trending on artstation"
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type="np" , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 308 | 1 |
import re
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> str:
if len(re.findall("[ATCG]" , lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> int:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : List[Any] = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
A = CLIPConfig
A = ['''CLIPEncoderLayer''']
def __init__( self : str , UpperCamelCase_ : CLIPConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase__ )
lowerCamelCase_ : List[str] = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase_ : Union[str, Any] = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase_ : Optional[int] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=0.5 , UpperCamelCase_ : Union[str, Any]=0.5 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.vision_model(UpperCamelCase__ )[0]
lowerCamelCase_ : int = self.p_head(UpperCamelCase__ )
lowerCamelCase_ : Tuple = nsfw_detected.flatten()
lowerCamelCase_ : Optional[int] = nsfw_detected > p_threshold
lowerCamelCase_ : Optional[Any] = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
lowerCamelCase_ : Any = np.zeros(images[idx].shape )
lowerCamelCase_ : Any = self.w_head(UpperCamelCase__ )
lowerCamelCase_ : Dict = watermark_detected.flatten()
lowerCamelCase_ : str = watermark_detected > w_threshold
lowerCamelCase_ : Optional[Any] = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
lowerCamelCase_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__(self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : int=7 , _snake_case : int=True , _snake_case : List[str]=True , _snake_case : str=True , _snake_case : List[Any]=True , _snake_case : Optional[int]=True , _snake_case : Any=False , _snake_case : Optional[int]=False , _snake_case : Optional[Any]=False , _snake_case : Tuple=2 , _snake_case : Dict=99 , _snake_case : Union[str, Any]=0 , _snake_case : int=32 , _snake_case : List[str]=5 , _snake_case : Dict=4 , _snake_case : List[str]=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=512 , _snake_case : Any=2 , _snake_case : Dict=0.02 , _snake_case : Tuple=2 , _snake_case : int=4 , _snake_case : Any="last" , _snake_case : str=True , _snake_case : Tuple=None , _snake_case : Any=0 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : Dict = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Union[str, Any] = use_input_lengths
lowerCamelCase_ : str = use_token_type_ids
lowerCamelCase_ : Tuple = use_labels
lowerCamelCase_ : Tuple = gelu_activation
lowerCamelCase_ : Any = sinusoidal_embeddings
lowerCamelCase_ : int = causal
lowerCamelCase_ : Dict = asm
lowerCamelCase_ : List[Any] = n_langs
lowerCamelCase_ : Optional[Any] = vocab_size
lowerCamelCase_ : List[str] = n_special
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = max_position_embeddings
lowerCamelCase_ : List[str] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Dict = num_labels
lowerCamelCase_ : Dict = num_choices
lowerCamelCase_ : Union[str, Any] = summary_type
lowerCamelCase_ : str = use_proj
lowerCamelCase_ : str = scope
lowerCamelCase_ : Dict = bos_token_id
def UpperCAmelCase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Union[str, Any] = None
if self.use_input_lengths:
lowerCamelCase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Any = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : int = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ (self : Dict ) -> Dict:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase_ (self : str , _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple , _snake_case : Any , _snake_case : Dict , _snake_case : List[Any] , _snake_case : int , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = XLMModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : str = model(_snake_case , lengths=_snake_case , langs=_snake_case )
lowerCamelCase_ : Optional[int] = model(_snake_case , langs=_snake_case )
lowerCamelCase_ : int = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : Any , _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : str , _snake_case : int , _snake_case : Any , _snake_case : List[str] , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = XLMWithLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : List[str] , _snake_case : Any , _snake_case : Any , _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[Any] = XLMForQuestionAnsweringSimple(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : List[Any] = model(_snake_case )
lowerCamelCase_ : List[str] = model(_snake_case , start_positions=_snake_case , end_positions=_snake_case )
lowerCamelCase_ : List[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self : List[str] , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = XLMForQuestionAnswering(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : List[Any] = model(_snake_case )
lowerCamelCase_ : List[str] = model(
_snake_case , start_positions=_snake_case , end_positions=_snake_case , cls_index=_snake_case , is_impossible=_snake_case , p_mask=_snake_case , )
lowerCamelCase_ : Dict = model(
_snake_case , start_positions=_snake_case , end_positions=_snake_case , cls_index=_snake_case , is_impossible=_snake_case , )
((lowerCamelCase_) , ) : List[str] = result_with_labels.to_tuple()
lowerCamelCase_ : Dict = model(_snake_case , start_positions=_snake_case , end_positions=_snake_case )
((lowerCamelCase_) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : List[str] , _snake_case : Any , _snake_case : Dict , _snake_case : int , _snake_case : str , _snake_case : Dict , _snake_case : str , _snake_case : List[Any] , _snake_case : Dict , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = XLMForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Optional[int] = model(_snake_case )
lowerCamelCase_ : Union[str, Any] = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ (self : Dict , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Tuple , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : int = self.num_labels
lowerCamelCase_ : Any = XLMForTokenClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Optional[Any] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any , _snake_case : List[str] , _snake_case : str , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Any = self.num_choices
lowerCamelCase_ : Dict = XLMForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : int = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : List[str] = config_and_inputs
lowerCamelCase_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase_ : int = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ (self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any] ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ (self : str , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any]=False ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCamelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
lowerCamelCase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCAmelCase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = XLMModelTester(self )
lowerCamelCase_ : Any = ConfigTester(self , config_class=_snake_case , emb_dim=37 )
def UpperCAmelCase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_snake_case )
def UpperCAmelCase_ (self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_snake_case )
def UpperCAmelCase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_snake_case )
def UpperCAmelCase_ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_snake_case )
def UpperCAmelCase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_snake_case )
def UpperCAmelCase_ (self : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_snake_case )
def UpperCAmelCase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_snake_case )
def UpperCAmelCase_ (self : List[Any] , _snake_case : str , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Union[str, Any]=False , _snake_case : Dict=1 ) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(_snake_case , _snake_case )
self.assertListEqual(
[isinstance(_snake_case , _snake_case ) for iter_attentions in attentions] , [True] * len(_snake_case ) )
self.assertEqual(len(_snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_snake_case ):
# adds PAD dummy token
lowerCamelCase_ : List[str] = min_length + idx + 1
lowerCamelCase_ : Optional[int] = min_length + idx + 1
lowerCamelCase_ : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_snake_case ) )
def UpperCAmelCase_ (self : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any]=False , _snake_case : List[Any]=1 ) -> Tuple:
"""simple docstring"""
self.assertIsInstance(_snake_case , _snake_case )
self.assertListEqual(
[isinstance(_snake_case , _snake_case ) for iter_hidden_states in hidden_states] , [True] * len(_snake_case ) , )
self.assertEqual(len(_snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_snake_case ):
# adds PAD dummy token
lowerCamelCase_ : Dict = min_length + idx + 1
lowerCamelCase_ : List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_snake_case ) , )
pass
@slow
def UpperCAmelCase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = XLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ (self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_snake_case )
lowerCamelCase_ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_snake_case ) # the president
lowerCamelCase_ : Optional[int] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCamelCase_ : Union[str, Any] = model.generate(_snake_case , do_sample=_snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _snake_case )
| 144 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
UpperCamelCase = None
def _a ( ) -> Tuple:
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCamelCase__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCamelCase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def _a ( lowerCamelCase__ ) -> Any:
def remove_articles(lowerCamelCase__ ):
return ARTICLES_REGEX.sub(' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowerCamelCase_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _a ( lowerCamelCase__ ) -> Optional[Any]:
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowerCamelCase_ : Dict = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Any = get_tokens(lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowerCamelCase_ : Any = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : Any = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
lowerCamelCase_ : List[Any] = {}
lowerCamelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ : Optional[Any] = qa['id']
lowerCamelCase_ : List[str] = [t for t in qa['answers']['text'] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ : List[Any] = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
lowerCamelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCamelCase_ : Tuple = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowerCamelCase_ : str = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
lowerCamelCase_ : Union[str, Any] = {}
for qid, s in scores.items():
lowerCamelCase_ : str = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ : str = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase_ : List[Any] = s
return new_scores
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[Any]:
if not qid_list:
lowerCamelCase_ : int = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCamelCase_ : Tuple = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
for k in new_eval:
lowerCamelCase_ : str = new_eval[k]
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
plt.step(lowerCamelCase__ , lowerCamelCase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ) -> Dict:
lowerCamelCase_ : str = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
lowerCamelCase_ : List[str] = 0.0
lowerCamelCase_ : str = 1.0
lowerCamelCase_ : Union[str, Any] = 0.0
lowerCamelCase_ : str = [1.0]
lowerCamelCase_ : Any = [0.0]
lowerCamelCase_ : Optional[int] = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ : List[Any] = true_pos / float(i + 1 )
lowerCamelCase_ : str = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase_ : Dict = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase_ : Optional[Any] = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCamelCase_ : List[Any] = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ : str = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_exact' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_f1' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_oracle' )
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
if not qid_list:
return
lowerCamelCase_ : int = [na_probs[k] for k in qid_list]
lowerCamelCase_ : Dict = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(lowerCamelCase__ , F'na_prob_hist_{name}.png' ) )
plt.clf()
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
lowerCamelCase_ : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase_ : Tuple = num_no_ans
lowerCamelCase_ : Dict = cur_score
lowerCamelCase_ : int = 0.0
lowerCamelCase_ : int = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ : List[str] = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ : int = -1
else:
lowerCamelCase_ : Any = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ : List[str] = cur_score
lowerCamelCase_ : Dict = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def _a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ : Any = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = best_exact
lowerCamelCase_ : List[str] = exact_thresh
lowerCamelCase_ : str = best_fa
lowerCamelCase_ : Optional[int] = fa_thresh
def _a ( ) -> Optional[Any]:
with open(OPTS.data_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCamelCase_ : List[str] = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase_ : int = json.load(lowerCamelCase__ )
else:
lowerCamelCase_ : Dict = {k: 0.0 for k in preds}
lowerCamelCase_ : List[Any] = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowerCamelCase_ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Any = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Dict = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ : Tuple = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowerCamelCase_ : List[str] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'HasAns' )
if no_ans_qids:
lowerCamelCase_ : Optional[Any] = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 144 | 1 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __magic_name__ ( _a):
def __init__( self : Dict ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Optional[int]=1_0_2_4 ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_0_2_4 ,__SCREAMING_SNAKE_CASE : List[Any]=3.6 ):
UpperCAmelCase = tokenizer
UpperCAmelCase = tokenizer.bos_token_id
UpperCAmelCase = dataset
UpperCAmelCase = seq_length
UpperCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : str ):
UpperCAmelCase = iter(self.dataset )
UpperCAmelCase = True
while more_examples:
UpperCAmelCase , UpperCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__SCREAMING_SNAKE_CASE )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase = False
break
UpperCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE )["input_ids"]
UpperCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(__SCREAMING_SNAKE_CASE ) ,self.seq_length ):
UpperCAmelCase = all_token_ids[i : i + self.seq_length]
if len(__SCREAMING_SNAKE_CASE ) == self.seq_length:
yield torch.tensor(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = {"streaming": True}
UpperCAmelCase = load_dataset(args.dataset_name , split="train" , **_lowerCAmelCase )
UpperCAmelCase = ConstantLengthDataset(_lowerCAmelCase , _lowerCAmelCase , seq_length=args.seq_length )
UpperCAmelCase = DataLoader(_lowerCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
model.eval()
UpperCAmelCase = []
for step, batch in enumerate(_lowerCAmelCase ):
with torch.no_grad():
UpperCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
UpperCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase = torch.mean(torch.cat(_lowerCAmelCase ) )
try:
UpperCAmelCase = torch.exp(_lowerCAmelCase )
except OverflowError:
UpperCAmelCase = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowerCAmelCase =Accelerator()
# Parse configuration
__lowerCAmelCase =HfArgumentParser(EvaluationArguments)
__lowerCAmelCase =parser.parse_args()
set_seed(args.seed)
# Logging
__lowerCAmelCase =logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__lowerCAmelCase =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowerCAmelCase =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowerCAmelCase =create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__lowerCAmelCase , __lowerCAmelCase =evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 333 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[Any] = 'informer'
_UpperCAmelCase : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : str = "student_t" ,__SCREAMING_SNAKE_CASE : str = "nll" ,__SCREAMING_SNAKE_CASE : int = 1 ,__SCREAMING_SNAKE_CASE : List[int] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ,__SCREAMING_SNAKE_CASE : Optional[List[int]] = None ,__SCREAMING_SNAKE_CASE : int = 6_4 ,__SCREAMING_SNAKE_CASE : int = 3_2 ,__SCREAMING_SNAKE_CASE : int = 3_2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : int = 2 ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : str = "gelu" ,__SCREAMING_SNAKE_CASE : float = 0.05 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : int = 1_0_0 ,__SCREAMING_SNAKE_CASE : float = 0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : str = "prob" ,__SCREAMING_SNAKE_CASE : int = 5 ,__SCREAMING_SNAKE_CASE : bool = True ,**__SCREAMING_SNAKE_CASE : List[str] ,):
# time series specific configuration
UpperCAmelCase = prediction_length
UpperCAmelCase = context_length or prediction_length
UpperCAmelCase = distribution_output
UpperCAmelCase = loss
UpperCAmelCase = input_size
UpperCAmelCase = num_time_features
UpperCAmelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase = scaling
UpperCAmelCase = num_dynamic_real_features
UpperCAmelCase = num_static_real_features
UpperCAmelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase = cardinality
else:
UpperCAmelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase = embedding_dimension
else:
UpperCAmelCase = [min(5_0 ,(cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase = d_model
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = decoder_layers
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = use_cache
# Informer
UpperCAmelCase = attention_type
UpperCAmelCase = sampling_factor
UpperCAmelCase = distil
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 333 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 24 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def UpperCamelCase__ ( self ) -> List[str]:
super().setup()
A = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> str:
A = super().__call__(*__a ,**__a )
A = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def _A ( _a : Optional[int] , _a : int , _a : List[str] , _a : Union[str, Any] , _a : Dict , _a : List[str] ):
"""simple docstring"""
def cross_entropy(_a : Union[str, Any] , _a : Tuple , _a : Tuple=None ):
A = logits.shape[-1]
A = (labels[..., None] == jnp.arange(_UpperCAmelCase )[None]).astype("""f4""" )
A = jax.nn.log_softmax(_UpperCAmelCase , axis=-1 )
A = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
A = reduction(_UpperCAmelCase )
return loss
A = partial(_UpperCAmelCase , reduction=jnp.mean )
A = cross_entropy(_UpperCAmelCase , _UpperCAmelCase )
A = cross_entropy(_UpperCAmelCase , _UpperCAmelCase )
A = cross_entropy(_UpperCAmelCase , _UpperCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = '''google/bigbird-roberta-base'''
_lowerCamelCase = 3000
_lowerCamelCase = 10500
_lowerCamelCase = 128
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3E-5
_lowerCamelCase = 0.0
_lowerCamelCase = 20000
_lowerCamelCase = 0.0_0_9_5
_lowerCamelCase = '''bigbird-roberta-natural-questions'''
_lowerCamelCase = '''training-expt'''
_lowerCamelCase = '''data/nq-training.jsonl'''
_lowerCamelCase = '''data/nq-validation.jsonl'''
def UpperCamelCase__ ( self ) -> Optional[int]:
os.makedirs(self.base_dir ,exist_ok=__a )
A = os.path.join(self.base_dir ,self.save_dir )
A = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 4096 # no dynamic padding on TPUs
def __call__( self ,lowerCamelCase_ ) -> Any:
A = self.collate_fn(__a )
A = jax.tree_util.tree_map(__a ,__a )
return batch
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Any:
A = self.fetch_inputs(features["""input_ids"""] )
A = {
'input_ids': jnp.array(__a ,dtype=jnp.intaa ),
'attention_mask': jnp.array(__a ,dtype=jnp.intaa ),
'start_labels': jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
'end_labels': jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
'pooled_labels': jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
A = [self._fetch_inputs(__a ) for ids in input_ids]
return zip(*__a )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
A = [1 for _ in range(len(__a ) )]
while len(__a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _A ( _a : Tuple , _a : Dict , _a : Optional[Any]=None ):
"""simple docstring"""
if seed is not None:
A = dataset.shuffle(seed=_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) // batch_size ):
A = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_UpperCAmelCase )
@partial(jax.pmap , axis_name="""batch""" )
def _A ( _a : Optional[int] , _a : Optional[Any] , **_a : List[str] ):
"""simple docstring"""
def loss_fn(_a : Optional[int] ):
A = model_inputs.pop("""start_labels""" )
A = model_inputs.pop("""end_labels""" )
A = model_inputs.pop("""pooled_labels""" )
A = state.apply_fn(**_UpperCAmelCase , params=_UpperCAmelCase , dropout_rng=_UpperCAmelCase , train=_UpperCAmelCase )
A = outputs
return state.loss_fn(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
A = jax.random.split(_UpperCAmelCase )
A = jax.value_and_grad(_UpperCAmelCase )
A = grad_fn(state.params )
A = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
A = jax.lax.pmean(_UpperCAmelCase , """batch""" )
A = state.apply_gradients(grads=_UpperCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _A ( _a : List[Any] , **_a : Optional[Any] ):
"""simple docstring"""
A = model_inputs.pop("""start_labels""" )
A = model_inputs.pop("""end_labels""" )
A = model_inputs.pop("""pooled_labels""" )
A = state.apply_fn(**_UpperCAmelCase , params=state.params , train=_UpperCAmelCase )
A = outputs
A = state.loss_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowerCamelCase__ ( train_state.TrainState ):
'''simple docstring'''
_lowerCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ) -> List[str]:
A = model.params
A = TrainState.create(
apply_fn=model.__call__ ,params=__a ,tx=__a ,loss_fn=__a ,)
if ckpt_dir is not None:
A = restore_checkpoint(__a ,__a )
A = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
A = build_tx(**__a )
A = train_state.TrainState(
step=__a ,apply_fn=model.__call__ ,params=__a ,tx=__a ,opt_state=__a ,)
A = args
A = data_collator
A = lr
A = params
A = jax_utils.replicate(__a )
return state
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = self.args
A = len(__a ) // args.batch_size
A = jax.random.PRNGKey(0 )
A = jax.random.split(__a ,jax.device_count() )
for epoch in range(args.max_epochs ):
A = jnp.array(0 ,dtype=jnp.floataa )
A = get_batched_dataset(__a ,args.batch_size ,seed=__a )
A = 0
for batch in tqdm(__a ,total=__a ,desc=f'Running EPOCH-{epoch}' ):
A = self.data_collator(__a )
A = self.train_step_fn(__a ,__a ,**__a )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A = jax_utils.unreplicate(state.step )
A = running_loss.item() / i
A = self.scheduler_fn(state_step - 1 )
A = self.evaluate(__a ,__a )
A = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__a ) )
self.logger.log(__a ,commit=__a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' ,state=__a )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = get_batched_dataset(__a ,self.args.batch_size )
A = len(__a ) // self.args.batch_size
A = jnp.array(0 ,dtype=jnp.floataa )
A = 0
for batch in tqdm(__a ,total=__a ,desc="""Evaluating ... """ ):
A = self.data_collator(__a )
A = self.val_step_fn(__a ,**__a )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = jax_utils.unreplicate(__a )
print(f'SAVING CHECKPOINT IN {save_dir}' ,end=""" ... """ )
self.model_save_fn(__a ,params=state.params )
with open(os.path.join(__a ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(__a ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(__a ,"""data_collator.joblib""" ) )
with open(os.path.join(__a ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,__a )
print("""DONE""" )
def _A ( _a : Any , _a : List[str] ):
"""simple docstring"""
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(_UpperCAmelCase , """flax_model.msgpack""" ) , """rb""" ) as f:
A = from_bytes(state.params , f.read() )
with open(os.path.join(_UpperCAmelCase , """opt_state.msgpack""" ) , """rb""" ) as f:
A = from_bytes(state.opt_state , f.read() )
A = joblib.load(os.path.join(_UpperCAmelCase , """args.joblib""" ) )
A = joblib.load(os.path.join(_UpperCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_UpperCAmelCase , """training_state.json""" ) , """r""" ) as f:
A = json.load(_UpperCAmelCase )
A = training_state['step']
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _A ( _a : Union[str, Any] , _a : Optional[Any] , _a : str , _a : Tuple ):
"""simple docstring"""
A = num_train_steps - warmup_steps
A = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=_UpperCAmelCase , transition_steps=_UpperCAmelCase )
A = optax.linear_schedule(init_value=_UpperCAmelCase , end_value=1E-7 , transition_steps=_UpperCAmelCase )
A = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A ( _a : Any , _a : Optional[int] , _a : List[Any] , _a : Optional[Any] , _a : Union[str, Any] ):
"""simple docstring"""
def weight_decay_mask(_a : Optional[Any] ):
A = traverse_util.flatten_dict(_UpperCAmelCase )
A = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_UpperCAmelCase )
A = scheduler_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A = optax.adamw(learning_rate=_UpperCAmelCase , weight_decay=_UpperCAmelCase , mask=_UpperCAmelCase )
return tx, lr
| 617 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 286 | 0 |
import os
def lowercase_ ( ):
"""simple docstring"""
with open(os.path.dirname(_A ) + "/p022_names.txt" ) as file:
lowerCamelCase__ : List[Any] = str(file.readlines()[0] )
lowerCamelCase__ : List[Any] = names.replace("\"" , "" ).split("," )
names.sort()
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Dict = 0
for i, name in enumerate(_A ):
for letter in name:
name_score += ord(_A ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 5 |
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _lowerCAmelCase ( __snake_case : List[str] ) -> Any:
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def _lowerCAmelCase ( __snake_case : Dict ) -> Optional[Any]:
for char in word:
__A : List[str] = ord(lowercase_ )
if not _is_chinese_char(lowercase_ ):
return 0
return 1
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Dict:
__A : List[Any] = set()
for token in tokens:
__A : Union[str, Any] = len(lowercase_ ) > 1 and is_chinese(lowercase_ )
if chinese_word:
word_set.add(lowercase_ )
__A : List[Any] = list(lowercase_ )
return word_list
def _lowerCAmelCase ( __snake_case : int , __snake_case : List[str] ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
__A : Tuple = max([len(lowercase_ ) for w in chinese_word_set] )
__A : List[Any] = bert_tokens
__A : int = 0, len(lowercase_ )
while start < end:
__A : Dict = True
if is_chinese(bert_word[start] ):
__A : List[Any] = min(end - start , lowercase_ )
for i in range(lowercase_ , 1 , -1 ):
__A : List[Any] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__A : Dict = "##" + bert_word[j]
__A : List[Any] = start + i
__A : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _lowerCAmelCase ( __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] ) -> List[str]:
__A : Optional[Any] = []
for i in range(0 , len(lowercase_ ) , 1_00 ):
__A : Dict = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
__A : int = [get_chinese_word(lowercase_ ) for r in res]
ltp_res.extend(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
__A : Any = []
for i in range(0 , len(lowercase_ ) , 1_00 ):
__A : str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowercase_ , truncation=lowercase_ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(lowercase_ ) == len(lowercase_ )
__A : str = []
for input_ids, chinese_word in zip(lowercase_ , lowercase_ ):
__A : Any = []
for id in input_ids:
__A : Tuple = bert_tokenizer._convert_id_to_token(lowercase_ )
input_tokens.append(lowercase_ )
__A : List[str] = add_sub_symbol(lowercase_ , lowercase_ )
__A : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase_ ):
if token[:2] == "##":
__A : str = token[2:]
# save chinese tokens' pos
if len(lowercase_ ) == 1 and _is_chinese_char(ord(lowercase_ ) ):
ref_id.append(lowercase_ )
ref_ids.append(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
return ref_ids
def _lowerCAmelCase ( __snake_case : str ) -> Optional[Any]:
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__A : Tuple = f.readlines()
__A : Any = [line.strip() for line in data if len(lowercase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__A : str = LTP(args.ltp ) # faster in GPU device
__A : Tuple = BertTokenizer.from_pretrained(args.bert )
__A : Optional[int] = prepare_ref(lowercase_ , lowercase_ , lowercase_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__A : Tuple = [json.dumps(lowercase_ ) + "\n" for ref in ref_ids]
f.writelines(lowercase_ )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
lowercase__ : Dict = parser.parse_args()
main(args) | 8 |
"""simple docstring"""
import random
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
for _ in range(len(lowercase_ ) ):
_UpperCamelCase : Dict = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase : str = random.randint(0 ,len(lowercase_ ) - 1 )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 624 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : str, _UpperCAmelCase : Any=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", __UpperCamelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor
SCREAMING_SNAKE_CASE__ : str = False
def __call__( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase, **__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", __UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("text", __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor(__UpperCamelCase, *__UpperCamelCase, **__UpperCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(__UpperCamelCase, **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase, **__UpperCamelCase )
def A_ ( self : int, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase, **__UpperCamelCase )
@contextmanager
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def A_ ( self : Dict, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : Optional[Any]=None ) -> int:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Tuple = {}
while tokens:
SCREAMING_SNAKE_CASE__ : str = re.search(r"<s_(.*?)>", __UpperCamelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Optional[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Tuple = re.search(rF'''</s_{key}>''', __UpperCamelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(__UpperCamelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Dict = end_token.group()
SCREAMING_SNAKE_CASE__ : int = re.escape(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', __UpperCamelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenajson(__UpperCamelCase, is_inner_value=__UpperCamelCase, added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Dict = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : List[str] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = output[key][0]
SCREAMING_SNAKE_CASE__ : Dict = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=__UpperCamelCase, added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", __UpperCamelCase, )
return self.image_processor_class
@property
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", __UpperCamelCase, )
return self.image_processor
| 715 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : Dict = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : List[str] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 157 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase_ (A : Union[str, Any] , A : Optional[int] , A : str ):
snake_case__ : Union[str, Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case__ : Optional[Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
snake_case__ : Tuple = F'''{src_lang}-{tgt_lang}'''
snake_case__ : str = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(A , exist_ok=A )
snake_case__ : Dict = os.path.join(A , 'README.md' )
print(F'''Generating {path}''' )
with open(A , 'w' , encoding='utf-8' ) as f:
f.write(A )
# make sure we are under the root of the project
a_ :Dict = Path(__file__).resolve().parent.parent.parent
a_ :Union[str, Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ , a_ , a_ :Any = model_name.split("-")
a_ :Dict = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 478 |
def lowercase_ (A : int , A : int ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case__ : List[str] = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : int = str(bin(A ) )[2:] # remove the leading "0b"
snake_case__ : Dict = max(len(A ) , len(A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(A ) , b_binary.zfill(A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=10 , __lowerCAmelCase=3 , __lowerCAmelCase=32 * 4 , __lowerCAmelCase=32 * 6 , __lowerCAmelCase=4 , __lowerCAmelCase=32 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def _lowerCamelCase ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
UpperCamelCase__ = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : str = False
snake_case : Any = False
snake_case : Union[str, Any] = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
UpperCamelCase__ = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCamelCase__ = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
UpperCamelCase__ = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase__ = 1e-4
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _lowerCamelCase ( self ):
UpperCamelCase__ = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__lowerCAmelCase )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
UpperCamelCase__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
UpperCamelCase__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
UpperCamelCase__ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__lowerCAmelCase )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
UpperCamelCase__ = inputs["""pixel_values"""].to(__lowerCAmelCase )
UpperCamelCase__ = [el.to(__lowerCAmelCase ) for el in inputs["""mask_labels"""]]
UpperCamelCase__ = [el.to(__lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 548 | 1 |
import math
def __snake_case ( __magic_name__ ):
'''simple docstring'''
return math.sqrt(__magic_name__ ) * math.sqrt(__magic_name__ ) == num
def __snake_case ( __magic_name__ ):
'''simple docstring'''
lowercase = 0
lowercase = n
while left <= right:
lowercase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase = mid - 1
else:
lowercase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
'''simple docstring'''
lowercase = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__magic_name__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__magic_name__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__magic_name__ )
return parser.parse_args()
def __snake_case ( ):
'''simple docstring'''
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 441 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Optional[int] , _a:Any=13 , _a:str=32 , _a:Tuple=2 , _a:Optional[Any]=3 , _a:Optional[int]=16 , _a:Union[str, Any]=[32, 64, 1_28] , _a:str=[1, 2, 1] , _a:Union[str, Any]=[2, 2, 4] , _a:List[Any]=2 , _a:Dict=2.0 , _a:Optional[Any]=True , _a:Optional[int]=0.0 , _a:Tuple=0.0 , _a:Tuple=0.1 , _a:Any="gelu" , _a:str=False , _a:str=True , _a:Dict=0.02 , _a:Optional[int]=1e-5 , _a:int=True , _a:List[str]=None , _a:Any=True , _a:Optional[Any]=10 , _a:List[str]=8 , _a:Tuple=["stage1", "stage2"] , _a:List[str]=[1, 2] , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = embed_dim
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = num_heads
snake_case__ = window_size
snake_case__ = mlp_ratio
snake_case__ = qkv_bias
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = drop_path_rate
snake_case__ = hidden_act
snake_case__ = use_absolute_embeddings
snake_case__ = patch_norm
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = is_training
snake_case__ = scope
snake_case__ = use_labels
snake_case__ = type_sequence_label_size
snake_case__ = encoder_stride
snake_case__ = out_features
snake_case__ = out_indices
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:str ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Any , _a:Tuple , _a:Optional[Any] ):
snake_case__ = FocalNetModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = model(__UpperCamelCase )
snake_case__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[str] , _a:Union[str, Any] , _a:Tuple ):
snake_case__ = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:str , _a:List[Any] , _a:List[str] ):
snake_case__ = FocalNetForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = FocalNetForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Union[str, Any] , _a:List[Any] , _a:Optional[int] ):
snake_case__ = self.type_sequence_label_size
snake_case__ = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__lowercase : int = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : List[Any] = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = FocalNetModelTester(self )
snake_case__ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 , has_text_modality=__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case__ = model_class(__UpperCamelCase )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Union[str, Any] , _a:List[str] , _a:List[str] , _a:List[Any] ):
snake_case__ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case__ = outputs.hidden_states
snake_case__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# FocalNet has a different seq_length
snake_case__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case__ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ = reshaped_hidden_states[0].shape
snake_case__ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case__ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case__ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = FocalNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case__ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(__UpperCamelCase )
snake_case__ = self.default_image_processor
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
snake_case__ = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(**__UpperCamelCase )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class __magic_name__ (_UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
__lowercase : Tuple = FocalNetConfig
__lowercase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = FocalNetModelTester(self )
| 719 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'trajectory_transformer'
__lowercase : int = ['past_key_values']
__lowercase : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self:List[str] , _a:Any=1_00 , _a:List[str]=5 , _a:Union[str, Any]=1 , _a:Any=1 , _a:Union[str, Any]=2_49 , _a:List[Any]=6 , _a:Tuple=17 , _a:str=25 , _a:List[str]=4 , _a:str=4 , _a:Dict=1_28 , _a:str=0.1 , _a:str=0.1 , _a:Dict=0.1 , _a:str=0.0006 , _a:Tuple=5_12 , _a:Any=0.02 , _a:Optional[int]=1e-12 , _a:List[str]=1 , _a:Any=True , _a:List[Any]=1 , _a:Dict=5_02_56 , _a:List[Any]=5_02_56 , **_a:Optional[Any] , ):
snake_case__ = vocab_size
snake_case__ = action_weight
snake_case__ = reward_weight
snake_case__ = value_weight
snake_case__ = max_position_embeddings
snake_case__ = block_size
snake_case__ = action_dim
snake_case__ = observation_dim
snake_case__ = transition_dim
snake_case__ = learning_rate
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_embd
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = resid_pdrop
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = kaiming_initializer_range
snake_case__ = use_cache
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 208 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a : List[str] = CustomTokenizer
pass
| 502 |
"""simple docstring"""
def _A ( __lowercase = 200_0000 ):
"""simple docstring"""
lowerCamelCase__ = [0 for i in range(n + 1 )]
lowerCamelCase__ = 1
lowerCamelCase__ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowercase ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
for i in range(__lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 129 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "xlnet"
_UpperCAmelCase = ["mems"]
_UpperCAmelCase = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,_A=3_2000 ,_A=1024 ,_A=24 ,_A=16 ,_A=4096 ,_A="gelu" ,_A=True ,_A="bi" ,_A=0.0_2 ,_A=1E-12 ,_A=0.1 ,_A=512 ,_A=None ,_A=True ,_A=False ,_A=False ,_A=-1 ,_A=False ,_A="last" ,_A=True ,_A="tanh" ,_A=0.1 ,_A=5 ,_A=5 ,_A=5 ,_A=1 ,_A=2 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : int = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_lowerCAmelCase : List[Any] = d_model // n_head
_lowerCAmelCase : List[Any] = ff_activation
_lowerCAmelCase : List[str] = d_inner
_lowerCAmelCase : int = untie_r
_lowerCAmelCase : int = attn_type
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = mem_len
_lowerCAmelCase : List[str] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : Any = clamp_len
_lowerCAmelCase : Dict = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : Optional[Any] = summary_use_proj
_lowerCAmelCase : int = summary_activation
_lowerCAmelCase : str = summary_last_dropout
_lowerCAmelCase : Any = start_n_top
_lowerCAmelCase : List[Any] = end_n_top
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' ,_A ,)
_lowerCAmelCase : List[Any] = kwargs['use_cache']
_lowerCAmelCase : str = use_mems_eval
_lowerCAmelCase : Optional[int] = use_mems_train
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.