code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = BioGptTokenizer
__magic_name__ = False
def lowerCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_A = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def lowerCAmelCase__ ( self , snake_case_ ):
_A = 'lower newer'
_A = 'lower newer'
return input_text, output_text
def lowerCAmelCase__ ( self ):
_A = BioGptTokenizer(self.vocab_file , self.merges_file )
_A = 'lower'
_A = ['low', 'er</w>']
_A = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_A = tokens + ['<unk>']
_A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
_A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_A = tokenizer.encode('sequence builders' , add_special_tokens=snake_case_ )
_A = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case_ )
_A = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_A = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 27 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 525 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not nums:
return 0
__snake_case: List[str] = nums[0]
__snake_case: List[str] = 0
for num in nums[1:]:
__snake_case: List[Any] = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__),
)
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
# load base model
__snake_case: str = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__snake_case: Dict = load_file(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""")
__snake_case: Union[str, Any] = pipeline.text_encoder
else:
__snake_case: Optional[int] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""")
__snake_case: List[Any] = pipeline.unet
# find the target layer
__snake_case: Optional[Any] = layer_infos.pop(0)
while len(SCREAMING_SNAKE_CASE__) > -1:
try:
__snake_case: Optional[Any] = curr_layer.__getattr__(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[int] = layer_infos.pop(0)
elif len(SCREAMING_SNAKE_CASE__) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE__) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__snake_case: Tuple = layer_infos.pop(0)
__snake_case: Any = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up"""))
pair_keys.append(SCREAMING_SNAKE_CASE__)
else:
pair_keys.append(SCREAMING_SNAKE_CASE__)
pair_keys.append(key.replace("""lora_up""" , """lora_down"""))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__snake_case: Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).unsqueeze(2).unsqueeze(3)
else:
__snake_case: List[Any] = state_dict[pair_keys[0]].to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE__)
return pipeline
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = args.base_model_path
__UpperCAmelCase : str = args.checkpoint_path
__UpperCAmelCase : List[str] = args.dump_path
__UpperCAmelCase : Optional[int] = args.lora_prefix_unet
__UpperCAmelCase : Optional[int] = args.lora_prefix_text_encoder
__UpperCAmelCase : int = args.alpha
__UpperCAmelCase : List[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase : Union[str, Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 155 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' ,set() )
@pytest.fixture
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = metric_id
class A__ :
"""simple docstring"""
__magic_name__ = [MetricMock(snake_case__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def a_ ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' ,HfhMock() )
@pytest.mark.parametrize(
'''func, args''' ,[(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if "tmp_path" in args:
snake_case = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(_UpperCamelCase ,match='''https://huggingface.co/docs/evaluate''' ):
func(*_UpperCamelCase )
| 550 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 620 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = 3_84
A_ : Union[str, Any] = 7
if "tiny" in model_name:
A_ : Optional[int] = 96
A_ : Optional[Any] = (2, 2, 6, 2)
A_ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
A_ : int = 96
A_ : Optional[int] = (2, 2, 18, 2)
A_ : Optional[int] = (3, 6, 12, 24)
elif "base" in model_name:
A_ : List[Any] = 1_28
A_ : List[Any] = (2, 2, 18, 2)
A_ : Optional[Any] = (4, 8, 16, 32)
A_ : List[Any] = 12
A_ : int = 5_12
elif "large" in model_name:
A_ : List[Any] = 1_92
A_ : Union[str, Any] = (2, 2, 18, 2)
A_ : List[Any] = (6, 12, 24, 48)
A_ : int = 12
A_ : Any = 7_68
# set label information
A_ : str = 1_50
A_ : Union[str, Any] = """huggingface/label-files"""
A_ : Optional[int] = """ade20k-id2label.json"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Dict = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : str = {v: k for k, v in idalabel.items()}
A_ : Optional[int] = SwinConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , num_heads=lowerCamelCase__ , window_size=lowerCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
A_ : Any = UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dct.pop(lowerCamelCase__ )
A_ : Union[str, Any] = val
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A_ : Optional[Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:dim, :]
A_ : Optional[int] = in_proj_bias[: dim]
A_ : Dict = in_proj_weight[
dim : dim * 2, :
]
A_ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
A_ : Tuple = in_proj_weight[
-dim :, :
]
A_ : str = in_proj_bias[-dim :]
# fmt: on
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Union[str, Any] = x.shape
A_ : Dict = x.reshape(lowerCamelCase__ , 4 , in_channel // 4 )
A_ : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Optional[Any] = x.shape
A_ : Any = x.reshape(lowerCamelCase__ , in_channel // 4 , 4 )
A_ : Optional[int] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = x.shape[0]
A_ : str = x.reshape(4 , in_channel // 4 )
A_ : List[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = x.shape[0]
A_ : List[Any] = x.reshape(in_channel // 4 , 4 )
A_ : Optional[int] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
A_ : int = model_name_to_url[model_name]
A_ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , file_name=lowerCamelCase__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase__ , param.shape )
A_ : Union[str, Any] = get_upernet_config(lowerCamelCase__ )
A_ : List[str] = UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
if "bn" in key:
A_ : int = key.replace("""bn""" , """batch_norm""" )
A_ : Union[str, Any] = val
# rename keys
A_ : List[str] = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A_ : List[str] = reverse_correct_unfold_reduction_order(lowerCamelCase__ )
if "norm" in key:
A_ : Tuple = reverse_correct_unfold_norm_order(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
A_ : str = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
A_ : List[str] = SegformerImageProcessor()
A_ : Optional[int] = processor(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A_ : Union[str, Any] = model(lowerCamelCase__ )
A_ : str = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A_ : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
A_ : List[str] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
A_ : Union[str, Any] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
A_ : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def A__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__lowerCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=a_ , unet=self.dummy_unet , mel=a_ , scheduler=a_ )
__lowerCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__lowerCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=a_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=a_ , steps=4 , return_dict=a_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a_ , scheduler=a_ )
__lowerCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
__lowerCAmelCase = pipe(raw_audio=a_ , generator=a_ , start_step=5 , steps=10 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a_ , mel=a_ , scheduler=a_ )
__lowerCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 10) )
__lowerCAmelCase = pipe(generator=a_ , encoding=a_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__lowerCAmelCase = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__lowerCAmelCase = torch.Generator(device=a_ ).manual_seed(42 )
__lowerCAmelCase = pipe(generator=a_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
__lowerCAmelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 465 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = '''timm_backbone'''
def __init__( self , a_=None , a_=3 , a_=True , a_=True , a_=None , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Union[str, Any] = backbone
lowerCamelCase_ : str = num_channels
lowerCamelCase_ : List[Any] = features_only
lowerCamelCase_ : Any = use_pretrained_backbone
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Any = out_indices if out_indices is not None else (-1,)
| 250 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = """realm"""
def __init__( self : Tuple , _A : Any=3_0522 , _A : Tuple=768 , _A : Optional[Any]=128 , _A : Optional[int]=12 , _A : Dict=12 , _A : str=8 , _A : Optional[Any]=3072 , _A : int="gelu_new" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Dict=2 , _A : Dict=0.02 , _A : Dict=1e-12 , _A : Optional[Any]=256 , _A : Union[str, Any]=10 , _A : int=1e-3 , _A : Tuple=5 , _A : Any=320 , _A : Tuple=1335_3718 , _A : Tuple=5000 , _A : Tuple=1 , _A : Optional[int]=0 , _A : Optional[int]=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : str = retriever_proj_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = num_candidates
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
# Reader config
__SCREAMING_SNAKE_CASE : str = span_hidden_size
__SCREAMING_SNAKE_CASE : Tuple = max_span_width
__SCREAMING_SNAKE_CASE : Dict = reader_layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = reader_beam_size
__SCREAMING_SNAKE_CASE : str = reader_seq_len
# Retrieval config
__SCREAMING_SNAKE_CASE : Optional[Any] = num_block_records
__SCREAMING_SNAKE_CASE : List[str] = searcher_beam_size
| 716 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : List[str] = parent
__SCREAMING_SNAKE_CASE : Dict = batch_size
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
__SCREAMING_SNAKE_CASE : int = size
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''center_crop''' ) )
self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 131 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
class _snake_case ( snake_case__ ):
SCREAMING_SNAKE_CASE__ = "linear"
SCREAMING_SNAKE_CASE__ = "cosine"
SCREAMING_SNAKE_CASE__ = "cosine_with_restarts"
SCREAMING_SNAKE_CASE__ = "polynomial"
SCREAMING_SNAKE_CASE__ = "constant"
SCREAMING_SNAKE_CASE__ = "constant_with_warmup"
SCREAMING_SNAKE_CASE__ = "piecewise_constant"
def __lowerCamelCase ( UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : int = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCAmelCase , lambda UpperCAmelCase_ : 1 , last_epoch=_UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : int , UpperCAmelCase_ : int = -1 ):
"""simple docstring"""
def lr_lambda(UpperCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1.0 , _UpperCAmelCase ) )
return 1.0
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : str , UpperCAmelCase_ : int = -1 ):
"""simple docstring"""
a :Optional[int] = {}
a :Union[str, Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
a :Tuple = rule_str.split(''':''' )
a :Dict = int(_UpperCAmelCase )
a :int = float(_UpperCAmelCase )
a :Dict = value
a :List[str] = float(rule_list[-1] )
def create_rules_function(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
def rule_func(UpperCAmelCase_ : int ) -> float:
a :Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
a :Optional[Any] = create_rules_function(_UpperCAmelCase , _UpperCAmelCase )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=-1 ):
"""simple docstring"""
def lr_lambda(UpperCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.5 , UpperCAmelCase_ : int = -1 ):
"""simple docstring"""
def lr_lambda(UpperCAmelCase_ : List[Any] ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
a :Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 ):
"""simple docstring"""
def lr_lambda(UpperCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
a :Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=1E-7 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : List[Any]=-1 ):
"""simple docstring"""
a :Any = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(UpperCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
a :int = lr_init - lr_end
a :Optional[Any] = num_training_steps - num_warmup_steps
a :int = 1 - (current_step - num_warmup_steps) / decay_steps
a :Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, SchedulerType] , UpperCAmelCase_ : Optimizer , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : int = -1 , ):
"""simple docstring"""
a :List[str] = SchedulerType(_UpperCAmelCase )
a :str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCAmelCase , last_epoch=_UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCAmelCase , step_rules=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , num_cycles=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , power=_UpperCAmelCase , last_epoch=_UpperCAmelCase , )
return schedule_func(
_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
| 445 | '''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : str , A : Optional[int] , A : List[str] , A : List[str]=None , A : List[Any]=None ):
_UpperCAmelCase : Union[str, Any] = self.layer[current_layer](A , A , head_mask[current_layer] )
_UpperCAmelCase : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case__ , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , A : Tuple ):
super().__init__(A )
_UpperCAmelCase : Optional[Any] = BertEncoderWithPabee(A )
self.init_weights()
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[int] = 0
def _A ( self : Tuple , A : Tuple ):
_UpperCAmelCase : Union[str, Any] = threshold
def _A ( self : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : Any = patience
def _A ( self : List[Any] ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = 0
def _A ( self : int ):
_UpperCAmelCase : List[Any] = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase : str = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A )
@add_start_docstrings_to_model_forward(A )
def _A ( self : Optional[int] , A : int=None , A : Union[str, Any]=None , A : int=None , A : List[Any]=None , A : Dict=None , A : Dict=None , A : Optional[int]=None , A : str=None , A : Union[str, Any]=None , A : Any=None , A : List[Any]=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
_UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
_UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase : Optional[int] = torch.ones(A , device=A )
if token_type_ids is None:
_UpperCAmelCase : Optional[int] = torch.zeros(A , dtype=torch.long , device=A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(A , A , A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = encoder_hidden_states.size()
_UpperCAmelCase : Tuple = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase : Tuple = torch.ones(A , device=A )
_UpperCAmelCase : Optional[int] = self.invert_attention_mask(A )
else:
_UpperCAmelCase : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase : List[str] = self.get_head_mask(A , self.config.num_hidden_layers )
_UpperCAmelCase : List[Any] = self.embeddings(
input_ids=A , position_ids=A , token_type_ids=A , inputs_embeds=A )
_UpperCAmelCase : Optional[Any] = embedding_output
if self.training:
_UpperCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase : int = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_UpperCAmelCase : Dict = self.pooler(A )
_UpperCAmelCase : List[str] = output_layers[i](output_dropout(A ) )
res.append(A )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase : Tuple = self.encoder(
A , attention_mask=A , head_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
_UpperCAmelCase : str = [output_layers[self.config.num_hidden_layers - 1](A )]
else:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_UpperCAmelCase : int = self.pooler(A )
_UpperCAmelCase : Optional[Any] = output_layers[i](A )
if regression:
_UpperCAmelCase : Dict = logits.detach()
if patient_result is not None:
_UpperCAmelCase : Tuple = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase : Dict = 0
else:
_UpperCAmelCase : List[str] = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A ) ):
patient_counter += 1
else:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = logits
if patient_counter == self.patience:
break
_UpperCAmelCase : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case__ , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[str] , A : str ):
super().__init__(A )
_UpperCAmelCase : Union[str, Any] = config.num_labels
_UpperCAmelCase : int = BertModelWithPabee(A )
_UpperCAmelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A )
def _A ( self : Union[str, Any] , A : Any=None , A : Optional[int]=None , A : str=None , A : str=None , A : Optional[Any]=None , A : int=None , A : str=None , ):
_UpperCAmelCase : Any = self.bert(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase : List[Any] = (logits[-1],)
if labels is not None:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = 0
for ix, logits_item in enumerate(A ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : List[Any] = MSELoss()
_UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : int = CrossEntropyLoss()
_UpperCAmelCase : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 244 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {'vocab_file': 'vocab.txt'}
_lowercase : Dict = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_lowercase : Union[str, Any] = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
_lowercase : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class a_ ( __lowercase ):
lowercase_ : Dict = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = ConvBertTokenizer
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]="[UNK]" , __lowerCAmelCase : Optional[Any]="[SEP]" , __lowerCAmelCase : Optional[Any]="[PAD]" , __lowerCAmelCase : int="[CLS]" , __lowerCAmelCase : str="[MASK]" , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case = getattr(__a , normalizer_state.pop('type' ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**__a )
__snake_case = do_lower_case
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
__snake_case = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 720 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = '''poolformer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : int=[7, 3, 3, 3] , __lowerCAmelCase : Any=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[2, 1, 1, 1] , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Dict=0.02 , **__lowerCAmelCase : Dict , ):
__snake_case = num_channels
__snake_case = patch_size
__snake_case = stride
__snake_case = padding
__snake_case = pool_size
__snake_case = hidden_sizes
__snake_case = mlp_ratio
__snake_case = depths
__snake_case = patch_sizes
__snake_case = strides
__snake_case = num_encoder_blocks
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_layer_scale
__snake_case = layer_scale_init_value
__snake_case = initializer_range
super().__init__(**__lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Tuple ):
return 2E-3
| 427 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ):
_lowerCAmelCase :Optional[int] = parent
_lowerCAmelCase :Dict = batch_size
_lowerCAmelCase :Optional[Any] = image_size
_lowerCAmelCase :Optional[Any] = patch_size
_lowerCAmelCase :List[Any] = num_channels
_lowerCAmelCase :Optional[int] = embed_dim
_lowerCAmelCase :List[str] = hidden_sizes
_lowerCAmelCase :Union[str, Any] = depths
_lowerCAmelCase :int = num_heads
_lowerCAmelCase :Any = window_size
_lowerCAmelCase :List[Any] = mlp_ratio
_lowerCAmelCase :Optional[int] = qkv_bias
_lowerCAmelCase :Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase :Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase :Dict = drop_path_rate
_lowerCAmelCase :List[Any] = hidden_act
_lowerCAmelCase :Tuple = use_absolute_embeddings
_lowerCAmelCase :Optional[int] = patch_norm
_lowerCAmelCase :Optional[Any] = layer_norm_eps
_lowerCAmelCase :Union[str, Any] = initializer_range
_lowerCAmelCase :List[str] = is_training
_lowerCAmelCase :str = scope
_lowerCAmelCase :Optional[int] = use_labels
_lowerCAmelCase :List[Any] = type_sequence_label_size
_lowerCAmelCase :Union[str, Any] = encoder_stride
_lowerCAmelCase :Optional[int] = out_features
_lowerCAmelCase :List[str] = out_indices
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase :Dict = None
if self.use_labels:
_lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase :str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self: int ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase :Optional[int] = None
_lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ):
_lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :int = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase :Optional[int] = 1
_lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase :List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs
_lowerCAmelCase :List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = FocalNetModelTester(self )
_lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self: str ):
pass
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Tuple = model_class(_UpperCAmelCase )
_lowerCAmelCase :Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase :int = [*signature.parameters.keys()]
_lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
_lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase :List[Any] = outputs.hidden_states
_lowerCAmelCase :str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# FocalNet has a different seq_length
_lowerCAmelCase :Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase :List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape
_lowerCAmelCase :Optional[int] = (
reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Dict = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :str = 3
_lowerCAmelCase :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase :int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase :List[str] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase :Union[str, Any] = True
self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self: int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase :str = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.default_image_processor
_lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase :Dict = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase :str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase : str = FocalNetConfig
lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Any = FocalNetModelTester(self ) | 687 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 11
UpperCAmelCase_ = int('''1''' + '''0''' * digit_len )
for num in range(_UpperCamelCase , _UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCamelCase , _UpperCamelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
UpperCAmelCase_ = 10
return solutions
def __lowerCamelCase ( _UpperCamelCase : int = 2 ):
'''simple docstring'''
UpperCAmelCase_ = 1.0
for fraction in fraction_list(_UpperCamelCase ):
UpperCAmelCase_ = Fraction(_UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 43 | '''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowercase , __lowercase = coefficient_matrix.shape
__lowercase , __lowercase = constant_matrix.shape
if rowsa != colsa:
__lowercase = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(__lowerCamelCase )
if colsa != 1:
__lowercase = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(__lowerCamelCase )
if rowsa != rowsa:
__lowercase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(__lowerCamelCase )
if len(__lowerCamelCase ) != rowsa:
__lowercase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'matrix but received {len(__lowerCamelCase )} and {rowsa}'
)
raise ValueError(__lowerCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__lowercase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__lowercase , __lowercase = table.shape
strictly_diagonally_dominant(__lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCamelCase ):
__lowercase = []
for row in range(__lowerCamelCase ):
__lowercase = 0
for col in range(__lowerCamelCase ):
if col == row:
__lowercase = table[row][col]
elif col == cols - 1:
__lowercase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__lowercase = (temp + val) / denom
new_val.append(__lowerCamelCase )
__lowercase = new_val
return [float(__lowerCamelCase ) for i in new_val]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase , __lowercase = table.shape
__lowercase = True
for i in range(0 , __lowerCamelCase ):
__lowercase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class a_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = len(A )
_SCREAMING_SNAKE_CASE = [any_type for _ in range(self.N )] + arr
_SCREAMING_SNAKE_CASE = fnc
self.build()
def snake_case_( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> None:
p += self.N
_SCREAMING_SNAKE_CASE = v
while p > 1:
_SCREAMING_SNAKE_CASE = p // 2
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> T | None: # noqa: E741
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = l + self.N, r + self.N
_SCREAMING_SNAKE_CASE = None
while l <= r:
if l % 2 == 1:
_SCREAMING_SNAKE_CASE = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
_SCREAMING_SNAKE_CASE = self.st[r] if res is None else self.fn(A , self.st[r] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ) ->None:
for i in range(len(__lowerCamelCase ) ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 314 | 0 |
"""simple docstring"""
import os
def _lowerCamelCase( ):
__a = os.path.dirname(os.path.realpath(a ) )
__a = os.path.join(a , "triangle.txt" )
with open(a ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a ) )
a.append(a )
for i in range(1 , len(a ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a , a )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 67 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 |
'''simple docstring'''
from math import isqrt, loga
def __UpperCamelCase ( lowercase_ : int ):
"""simple docstring"""
a_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase_ , lowercase_ ):
a_ = False
return [i for i in range(2 , lowercase_ ) if is_prime[i]]
def __UpperCamelCase ( lowercase_ : int = 800_800 , lowercase_ : int = 800_800 ):
"""simple docstring"""
a_ = degree * loga(lowercase_ )
a_ = int(lowercase_ )
a_ = calculate_prime_numbers(lowercase_ )
a_ = 0
a_ = 0
a_ = len(lowercase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 | 0 |
def lowerCamelCase( a__):
if len(a__) <= 1:
return [tuple(a__)]
_SCREAMING_SNAKE_CASE =[]
def generate(a__ ,a__):
if k == 1:
res.append(tuple(arr[:]))
return
generate(k - 1 ,a__)
for i in range(k - 1):
if k % 2 == 0: # k is even
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =arr[k - 1], arr[i]
else: # k is odd
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =arr[k - 1], arr[0]
generate(k - 1 ,a__)
generate(len(a__) ,a__)
return res
if __name__ == "__main__":
snake_case_ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : int = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 191 |
import string
from math import logaa
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation)).replace('''\n''' ,'''''')
_SCREAMING_SNAKE_CASE =document_without_punctuation.split(''' ''') # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation)) # strip all punctuation and replace it with ''
_SCREAMING_SNAKE_CASE =corpus_without_punctuation.split('''\n''')
_SCREAMING_SNAKE_CASE =term.lower()
return (len([doc for doc in docs if term in doc]), len(a__))
def lowerCamelCase( a__ ,a__ ,a__=False):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(1 + logaa(n / (1 + df)) ,3)
if df == 0:
raise ZeroDivisionError('''df must be > 0''')
elif n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(logaa(n / df) ,3)
def lowerCamelCase( a__ ,a__):
return round(tf * idf ,3) | 191 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0.01 , lowerCamelCase_=10_00 ) -> Union[str, Any]:
lowerCAmelCase__ = p_stop
lowerCAmelCase__ = max_length
def __iter__( self ) -> Any:
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase__ = random.random() < self.p_stop
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True ) -> Optional[Any]:
lowerCAmelCase__ = [
BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
for i in range(2 )
]
lowerCAmelCase__ = [list(lowerCamelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase_ ) for shard in batch_sampler_shards] , [len(lowerCamelCase_ ) for e in expected] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
lowerCAmelCase__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase__ = [BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=False ) -> str:
random.seed(lowerCamelCase_ )
lowerCAmelCase__ = list(lowerCamelCase_ )
lowerCAmelCase__ = [
IterableDatasetShard(
lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ , num_processes=lowerCamelCase_ , process_index=lowerCamelCase_ , split_batches=lowerCamelCase_ , )
for i in range(lowerCamelCase_ )
]
lowerCAmelCase__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase_ )
iterable_dataset_lists.append(list(lowerCamelCase_ ) )
lowerCAmelCase__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(len(lowerCamelCase_ ) % shard_batch_size == 0 )
lowerCAmelCase__ = []
for idx in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
reference += reference
self.assertListEqual(lowerCamelCase_ , reference[: len(lowerCamelCase_ )] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = 42
lowerCAmelCase__ = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Edge case with a very small dataset
lowerCAmelCase__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase_ )
lowerCAmelCase__ = SkipBatchSampler(lowerCamelCase_ , 2 )
self.assertListEqual(list(lowerCamelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase__ = skip_first_batches(lowerCamelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
Accelerator()
lowerCAmelCase__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) | 90 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase : Union[str, Any] = 50003
lowercase : Optional[int] = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : List[str]= PLBartTokenizer
_a : List[str]= None
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
lowercase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Any = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 4 ,snake_case )]
self.assertListEqual(snake_case ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowercase : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Tuple = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""multi""" ,keep_accents=snake_case )
lowercase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : int = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 7 ,snake_case )]
self.assertListEqual(
snake_case ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowercase : int = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Optional[Any] = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
_a : str= "uclanlp/plbart-python-en_XX"
_a : Union[str, Any]= [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_a : List[Any]= [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_a : Optional[Any]= [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
lowercase : Union[str, Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,50003 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertIn(snake_case ,self.tokenizer.all_special_ids )
lowercase : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case )
self.assertEqual(snake_case ,snake_case )
self.assertNotIn(self.tokenizer.eos_token ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] ,snake_case )
lowercase : Tuple = 10
lowercase : List[str] = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,snake_case )
self.assertEqual(len(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[50004, 50001] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case )
lowercase : Optional[Any] = PLBartTokenizer.from_pretrained(snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" )
lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case ,snake_case )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
lowercase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" )
lowercase : str = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" )
lowercase : int = targets["""input_ids"""]
lowercase : Tuple = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(snake_case ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} ,)
| 336 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_lowerCamelCase : Tuple = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_lowerCamelCase : Optional[int] = "UperNetConfig"
class __snake_case (nn.Module ):
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Union[int, Tuple[int, int]] , _UpperCAmelCase : Union[int, Tuple[int, int], str] = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = nn.ReLU()
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.conv(_UpperCAmelCase )
_lowerCAmelCase : int = self.batch_norm(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.activation(_UpperCAmelCase )
return output
class __snake_case (nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[Any] = [
nn.AdaptiveAvgPoolad(_UpperCAmelCase ),
UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = input
for layer in self.layers:
_lowerCAmelCase : Optional[Any] = layer(_UpperCAmelCase )
return hidden_state
class __snake_case (nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : Tuple[int, ...] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool ) -> None:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = pool_scales
_lowerCAmelCase : int = align_corners
_lowerCAmelCase : Optional[int] = in_channels
_lowerCAmelCase : Any = channels
_lowerCAmelCase : Optional[Any] = []
for i, pool_scale in enumerate(_UpperCAmelCase ):
_lowerCAmelCase : Dict = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase )
self.blocks.append(_UpperCAmelCase )
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : torch.Tensor ) -> List[torch.Tensor]:
'''simple docstring'''
_lowerCAmelCase : Any = []
for ppm in self.blocks:
_lowerCAmelCase : Any = ppm(_UpperCAmelCase )
_lowerCAmelCase : Any = nn.functional.interpolate(
_UpperCAmelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_UpperCAmelCase )
return ppm_outs
class __snake_case (nn.Module ):
def __init__( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = config
_lowerCAmelCase : str = config.pool_scales # e.g. (1, 2, 3, 6)
_lowerCAmelCase : Optional[int] = in_channels
_lowerCAmelCase : List[Any] = config.hidden_size
_lowerCAmelCase : str = False
_lowerCAmelCase : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_lowerCAmelCase : List[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_lowerCAmelCase : Tuple = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_lowerCAmelCase : int = nn.ModuleList()
_lowerCAmelCase : Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_lowerCAmelCase : Any = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 )
_lowerCAmelCase : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_UpperCAmelCase )
self.fpn_convs.append(_UpperCAmelCase )
_lowerCAmelCase : str = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
'''simple docstring'''
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Tuple = inputs[-1]
_lowerCAmelCase : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_UpperCAmelCase ) )
_lowerCAmelCase : int = torch.cat(_UpperCAmelCase , dim=1 )
_lowerCAmelCase : Optional[Any] = self.bottleneck(_UpperCAmelCase )
return output
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
_lowerCAmelCase : List[str] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_UpperCAmelCase ) )
# build top-down path
_lowerCAmelCase : str = len(_UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowerCAmelCase : Optional[Any] = laterals[i - 1].shape[2:]
_lowerCAmelCase : Any = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_UpperCAmelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
_lowerCAmelCase : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_lowerCAmelCase : Dict = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
_lowerCAmelCase : Any = torch.cat(_UpperCAmelCase , dim=1 )
_lowerCAmelCase : Dict = self.fpn_bottleneck(_UpperCAmelCase )
_lowerCAmelCase : int = self.classifier(_UpperCAmelCase )
return output
class __snake_case (nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = config
_lowerCAmelCase : Dict = config.auxiliary_in_channels
_lowerCAmelCase : int = config.auxiliary_channels
_lowerCAmelCase : str = config.auxiliary_num_convs
_lowerCAmelCase : List[str] = config.auxiliary_concat_input
_lowerCAmelCase : str = in_index
_lowerCAmelCase : int = (kernel_size // 2) * dilation
_lowerCAmelCase : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
if self.num_convs == 0:
_lowerCAmelCase : Optional[int] = nn.Identity()
else:
_lowerCAmelCase : str = nn.Sequential(*_UpperCAmelCase )
if self.concat_input:
_lowerCAmelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 )
_lowerCAmelCase : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
'''simple docstring'''
self.apply(self._init_weights )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = encoder_hidden_states[self.in_index]
_lowerCAmelCase : Any = self.convs(_UpperCAmelCase )
if self.concat_input:
_lowerCAmelCase : Dict = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_lowerCAmelCase : Dict = self.classifier(_UpperCAmelCase )
return output
class __snake_case (_a ):
lowerCAmelCase__ = UperNetConfig
lowerCAmelCase__ = "pixel_values"
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ) -> Tuple:
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : Tuple = value
_lowerCamelCase : int = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCamelCase : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _a , )
class __snake_case (_a ):
def __init__( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_lowerCAmelCase : Any = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels )
_lowerCAmelCase : Any = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
_lowerCAmelCase : Union[str, Any] = self.backbone.forward_with_filtered_kwargs(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
_lowerCAmelCase : List[str] = outputs.feature_maps
_lowerCAmelCase : Any = self.decode_head(_UpperCAmelCase )
_lowerCAmelCase : List[str] = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_UpperCAmelCase )
_lowerCAmelCase : int = None
if self.auxiliary_head is not None:
_lowerCAmelCase : Optional[int] = self.auxiliary_head(_UpperCAmelCase )
_lowerCAmelCase : Tuple = nn.functional.interpolate(
_UpperCAmelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
_lowerCAmelCase : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_lowerCAmelCase : Union[str, Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[str] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_lowerCAmelCase : Dict = (logits,) + outputs[1:]
else:
_lowerCAmelCase : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 701 |
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : int = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 196 | 0 |
def _a ( __UpperCamelCase : int = 50 ):
lowerCAmelCase__ : int = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class lowercase ( __UpperCamelCase ):
__a = """xmod"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=("en_XX",) , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : List[str] = pre_norm
lowerCAmelCase__ : str = adapter_reduction_factor
lowerCAmelCase__ : Optional[int] = adapter_layer_norm
lowerCAmelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCAmelCase__ : Optional[Any] = ln_before_adapter
lowerCAmelCase__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = default_language
class lowercase ( __UpperCamelCase ):
@property
def lowercase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 233 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCAmelCase =["text", "image", "audio"]
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
inputs.append(create_inputs(_lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
for output in outputs:
if isinstance(_lowerCAmelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __magic_name__ :
def _UpperCAmelCase ( self : Tuple ):
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input ,__SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) ,self.tool.outputs )
def _UpperCAmelCase ( self : Union[str, Any] ):
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE ,self.tool.outputs ):
UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE ,self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
| 700 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , "r" ) as fh:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX )
try:
print(*_lowerCAmelCase )
finally:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
__lowerCAmelCase =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase =torch.device("cuda", local_rank)
__lowerCAmelCase =socket.gethostname()
__lowerCAmelCase =f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase =dist.get_rank()
__lowerCAmelCase =dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 405 | 0 |
import re
from filelock import FileLock
try:
import nltk
__lowerCamelCase : List[str] = True
except (ImportError, ModuleNotFoundError):
__lowerCamelCase : int = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase_(lowerCamelCase_ ) -> str:
re.sub("<n>" , "" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 323 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase =deprecated_arg[3:]
__UpperCAmelCase =not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase =kwargs.pop("""tpu_name""" , self.tpu_name )
__UpperCAmelCase =kwargs.pop("""device_idx""" , self.device_idx )
__UpperCAmelCase =kwargs.pop("""eager_mode""" , self.eager_mode )
__UpperCAmelCase =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase : str = field(
default=UpperCamelCase , metadata={'help': 'Name of TPU'} , )
lowerCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowerCamelCase : bool = field(default=UpperCamelCase , metadata={'help': 'Benchmark models in eager model.'} )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
__UpperCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase =None
return tpu
@cached_property
def _a ( self : Tuple ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__UpperCAmelCase =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Optional[Any] ) -> bool:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : str ) -> "tf.distribute.Strategy":
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : Dict ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : List[str] ) -> int:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : List[str] ) -> bool:
return self.n_gpu > 0
| 68 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case_ = [ord(letter) for letter in string.ascii_lowercase]
snake_case_ = {ord(char) for char in VALID_CHARS}
snake_case_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCamelCase( UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] ) -> str | None:
A : str = ""
A : int
A : int
A : int
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
A : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def _lowerCamelCase( UpperCamelCase__ : list[int] ) -> list[str]:
A : list[str] = []
for key in product(UpperCamelCase__ , repeat=3 ):
A : List[str] = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def _lowerCamelCase( UpperCamelCase__ : list[str] , UpperCamelCase__ : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase( UpperCamelCase__ : str = "p059_cipher.txt" ) -> int:
A : list[int]
A : list[str]
A : str
A : str
A : str = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
A : List[Any] = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
A : int = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
A : int = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
A : Union[str, Any] = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 537 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = XLMProphetNetTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
A : Union[str, Any] = '''[PAD]'''
A : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def snake_case ( self ):
A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def snake_case ( self ):
A : Dict = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
A : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
A : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def snake_case ( self ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def snake_case ( self ):
A : Union[str, Any] = '''Hello World!'''
A : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def snake_case ( self ):
# fmt: off
A : List[Any] = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 537 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Tuple = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 598 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
SCREAMING_SNAKE_CASE_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def A__ ( __lowerCamelCase, __lowerCamelCase=5_12, __lowerCamelCase=5_12 ):
SCREAMING_SNAKE_CASE_ = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
SCREAMING_SNAKE_CASE_ = np.array(pil_image.convert('''RGB''' ) )
SCREAMING_SNAKE_CASE_ = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE_ = np.transpose(__lowerCamelCase, [2, 0, 1] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
SCREAMING_SNAKE_CASE_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self , _A , _A , _A ) -> List[str]:
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE_ = min(int(num_inference_steps * strength ) , _A )
SCREAMING_SNAKE_CASE_ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]:
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
SCREAMING_SNAKE_CASE_ = image.to(device=_A , dtype=_A )
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE_ = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
else:
SCREAMING_SNAKE_CASE_ = self.movq.encode(_A ).latent_dist.sample(_A )
SCREAMING_SNAKE_CASE_ = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE_ = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE_ = init_latents.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
SCREAMING_SNAKE_CASE_ = self.scheduler.add_noise(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = init_latents
return latents
def _UpperCamelCase ( self , _A=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE_ = torch.device(F'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def _UpperCamelCase ( self , _A=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ) -> Optional[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = image_embeds.shape[0]
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = negative_image_embeds.repeat_interleave(_A , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
SCREAMING_SNAKE_CASE_ = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE_ = image.to(dtype=image_embeds.dtype , device=_A )
SCREAMING_SNAKE_CASE_ = self.movq.encode(_A )['''latents''']
SCREAMING_SNAKE_CASE_ = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_timesteps(_A , _A , _A )
SCREAMING_SNAKE_CASE_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = downscale_height_and_width(_A , _A , self.movq_scale_factor )
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = {'''image_embeds''': image_embeds}
SCREAMING_SNAKE_CASE_ = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 597 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase_ =dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase_ =dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase_ =dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase_ =dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase_ =dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase_ =dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase_ =dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE_ = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE_ = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.sort('''probability''', reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = dataset.select(range(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = dataset.remove_columns(['''label''', '''probability'''] )
SCREAMING_SNAKE_CASE_ = dataset.rename_column('''prediction''', '''label''' )
SCREAMING_SNAKE_CASE_ = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE_ = dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase, index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, **__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE_ = STModelArguments(model_name_or_path=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STDataArguments(train_file=__lowerCamelCase, infer_file=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = STTrainingArguments(output_dir=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase, __lowerCamelCase ):
setattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Sanity checks
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE_ = args.train_file
SCREAMING_SNAKE_CASE_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE_ = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
SCREAMING_SNAKE_CASE_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Show the progress bar
SCREAMING_SNAKE_CASE_ = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE_ = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-1''' )
SCREAMING_SNAKE_CASE_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase, __lowerCamelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''stage-2''' )
# Update arguments_dict
SCREAMING_SNAKE_CASE_ = model_path
SCREAMING_SNAKE_CASE_ = data_files['''train''']
SCREAMING_SNAKE_CASE_ = current_output_dir
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''best-checkpoint''', __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', __lowerCamelCase, __lowerCamelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = iteration
SCREAMING_SNAKE_CASE_ = data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase, '''best-checkpoint''' ) )
SCREAMING_SNAKE_CASE_ = config.idalabel
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''eval_results_best-checkpoint.json''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''test_results_best-checkpoint.json''' )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase, '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = float(json.load(__lowerCamelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE_ = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
SCREAMING_SNAKE_CASE_ = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase, os.path.join(__lowerCamelCase, F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE_ = os.path.join(__lowerCamelCase, F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE_ = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
SCREAMING_SNAKE_CASE_ = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE_ = new_iteration
SCREAMING_SNAKE_CASE_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', __lowerCamelCase )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{iteration}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase, F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ), os.path.join(__lowerCamelCase, '''eval_results_best-iteration.json''' ), )
| 597 | 1 |
def A__ ( lowercase: str, lowercase: str ) -> bool:
A : Optional[Any] =len(lowercase )
A : str =len(lowercase )
A : Any =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A : List[Any] =True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A : List[str] =True
if a[i].islower():
A : Dict =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | def A__ ( lowercase: str ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
A : int =''
while len(lowercase ) % 3 != 0:
A : str ='0' + bin_string
A : int =[
bin_string[index : index + 3]
for index in range(len(lowercase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
A : Optional[Any] =0
for index, val in enumerate(lowercase ):
oct_val += int(2 ** (2 - index) * int(lowercase ) )
oct_string += str(lowercase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = DebertaTokenizer
a__ : List[str] = True
a__ : int = DebertaTokenizerFast
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__UpperCamelCase :Optional[int] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_))))
__UpperCamelCase :str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase :Any = {'''unk_token''': '''[UNK]'''}
__UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(lowerCAmelCase_) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(lowerCAmelCase_))
def UpperCamelCase__ ( self , **__lowercase) -> List[str]:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_)
def UpperCamelCase__ ( self , __lowercase) -> Any:
__UpperCamelCase :Dict = '''lower newer'''
__UpperCamelCase :str = '''lower newer'''
return input_text, output_text
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Union[str, Any] = '''lower newer'''
__UpperCamelCase :Optional[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase :str = tokenizer.tokenize(lowerCAmelCase_)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_)
__UpperCamelCase :Tuple = tokens + [tokenizer.unk_token]
__UpperCamelCase :Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_) , lowerCAmelCase_)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = self.get_tokenizer()
__UpperCamelCase :List[str] = tokenizer('''Hello''' , '''World''')
__UpperCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , lowerCAmelCase_)
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Tuple = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''')
__UpperCamelCase :str = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_)
__UpperCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_)
__UpperCamelCase :List[str] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
__UpperCamelCase :Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_)
__UpperCamelCase :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_)
__UpperCamelCase :int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
__UpperCamelCase :Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''')
__UpperCamelCase :str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__UpperCamelCase :Any = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_)
__UpperCamelCase :Union[str, Any] = [tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_) for seq in encoding['''input_ids''']]
# fmt: off
__UpperCamelCase :Union[str, Any] = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__UpperCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , lowerCAmelCase_)
for expected, decoded in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_)
| 717 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = StableUnCLIPImgaImgPipeline
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : List[Any] = frozenset([] )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = 32
__UpperCamelCase :int = embedder_hidden_size
# image encoding components
__UpperCamelCase :Dict = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Dict = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :Any = AutoencoderKL()
__UpperCamelCase :Dict = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :int = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :Tuple = input_image * 0.5 + 0.5
__UpperCamelCase :Any = input_image.clamp(0 , 1)
__UpperCamelCase :Any = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :int = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Dict = self.get_dummy_components()
__UpperCamelCase :Dict = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :int = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :str = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :int = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Any = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Tuple = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 452 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case_ = threading.Lock()
snake_case_ = None
snake_case_ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
snake_case_ = logging.WARNING
snake_case_ = True
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = os.getenv('TRANSFORMERS_VERBOSITY' , SCREAMING_SNAKE_CASE_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def snake_case__ ( ):
'''simple docstring'''
return __name__.split('.' )[0]
def snake_case__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def snake_case__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowercase__ : str = logging.StreamHandler() # Set sys.stderr as stream.
lowercase__ : Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowercase__ : str = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowercase__ : Union[str, Any] = False
def snake_case__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowercase__ : Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowercase__ : str = None
def snake_case__ ( ):
'''simple docstring'''
return log_levels
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowercase__ : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def snake_case__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowercase__ : Optional[Any] = False
def snake_case__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowercase__ : Optional[Any] = True
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
lowercase__ : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : str = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : List[Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , SCREAMING_SNAKE_CASE_ )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case_ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
self.warning(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case_ = warning_once
class SCREAMING_SNAKE_CASE__ :
def __init__( self , *a , **a): # pylint: disable=unused-argument
lowercase__ : Tuple = args[0] if args else None
def __iter__( self):
return iter(self._iterator)
def __getattr__( self , a):
def empty_fn(*a , **a): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
return self
def __exit__( self , a , a , a):
return
class SCREAMING_SNAKE_CASE__ :
def __call__( self , *a , **a):
if _tqdm_active:
return tqdm_lib.tqdm(*a , **a)
else:
return EmptyTqdm(*a , **a)
def snake_case_ ( self , *a , **a):
lowercase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a , **a)
def snake_case_ ( self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case_ = _tqdm_cls()
def snake_case__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def snake_case__ ( ):
'''simple docstring'''
global _tqdm_active
lowercase__ : Tuple = True
hf_hub_utils.enable_progress_bars()
def snake_case__ ( ):
'''simple docstring'''
global _tqdm_active
lowercase__ : List[Any] = False
hf_hub_utils.disable_progress_bars()
| 164 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : str = """umt5"""
__lowerCamelCase : List[str] = ["""past_key_values"""]
def __init__( self , a=25_0112 , a=512 , a=64 , a=1024 , a=8 , a=None , a=6 , a=32 , a=128 , a=0.1 , a=1e-6 , a=1.0 , a="gated-gelu" , a=True , a=True , a="T5Tokenizer" , a=True , a=0 , a=1 , a=0 , **a , ):
super().__init__(
is_encoder_decoder=a , tokenizer_class=a , tie_word_embeddings=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
lowercase__ : List[Any] = vocab_size
lowercase__ : Union[str, Any] = d_model
lowercase__ : List[str] = d_kv
lowercase__ : Union[str, Any] = d_ff
lowercase__ : Tuple = num_layers
lowercase__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Dict = num_heads
lowercase__ : List[str] = relative_attention_num_buckets
lowercase__ : str = relative_attention_max_distance
lowercase__ : Any = dropout_rate
lowercase__ : Union[str, Any] = layer_norm_epsilon
lowercase__ : Dict = initializer_factor
lowercase__ : Union[str, Any] = feed_forward_proj
lowercase__ : int = use_cache
lowercase__ : List[str] = self.feed_forward_proj.split('-')
lowercase__ : Any = act_info[-1]
lowercase__ : str = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ : List[Any] = 'gelu_new'
@property
def snake_case_ ( self):
return self.d_model
@property
def snake_case_ ( self):
return self.num_heads
@property
def snake_case_ ( self):
return self.num_layers
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case_ ( self):
lowercase__ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ : List[str] = 'past_encoder_sequence + sequence'
lowercase__ : Dict = {0: 'batch'}
lowercase__ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ : Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case_ ( self):
return 13
@property
def snake_case_ ( self):
return 5e-4
| 164 | 1 |
"""simple docstring"""
from __future__ import annotations
def A ( snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ = 0
if end is None:
SCREAMING_SNAKE_CASE__ = len(snake_case__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ = (start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 616 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
A_ : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
A_ : Tuple = {
"camembert-base": 512,
}
A_ : int = "▁"
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int="<s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Dict="<pad>" , __UpperCAmelCase : int="<mask>" , __UpperCAmelCase : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
SCREAMING_SNAKE_CASE__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE__ = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Tuple ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : int ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : int , __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 616 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 384
lowercase__ : Tuple = 7
if "tiny" in model_name:
lowercase__ : Tuple = 96
lowercase__ : Union[str, Any] = (2, 2, 6, 2)
lowercase__ : Optional[int] = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ : List[str] = 96
lowercase__ : Dict = (2, 2, 18, 2)
lowercase__ : Dict = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ : List[str] = 128
lowercase__ : Dict = (2, 2, 18, 2)
lowercase__ : str = (4, 8, 16, 32)
lowercase__ : List[str] = 12
lowercase__ : int = 512
elif "large" in model_name:
lowercase__ : Union[str, Any] = 192
lowercase__ : Tuple = (2, 2, 18, 2)
lowercase__ : List[str] = (6, 12, 24, 48)
lowercase__ : Optional[int] = 12
lowercase__ : int = 768
# set label information
lowercase__ : int = 150
lowercase__ : int = 'huggingface/label-files'
lowercase__ : Any = 'ade20k-id2label.json'
lowercase__ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowercase__ : str = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ : List[str] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowercase__ : Optional[int] = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
lowercase__ : int = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Tuple = dct.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = val
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowercase__ : Any = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[:dim, :]
lowercase__ : Union[str, Any] = in_proj_bias[: dim]
lowercase__ : str = in_proj_weight[
dim : dim * 2, :
]
lowercase__ : List[str] = in_proj_bias[
dim : dim * 2
]
lowercase__ : int = in_proj_weight[
-dim :, :
]
lowercase__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[str] = x.shape
lowercase__ : List[str] = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
lowercase__ : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ : List[str] = x.shape
lowercase__ : List[str] = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
lowercase__ : Optional[int] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = x.shape[0]
lowercase__ : List[str] = x.reshape(4 , in_channel // 4 )
lowercase__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : str = x.shape[0]
lowercase__ : Optional[int] = x.reshape(in_channel // 4 , 4 )
lowercase__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowercase__ : Tuple = model_name_to_url[model_name]
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE_ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
lowercase__ : str = get_upernet_config(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
lowercase__ : str = key.replace('bn' , 'batch_norm' )
lowercase__ : Tuple = val
# rename keys
lowercase__ : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ : int = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
lowercase__ : Optional[int] = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
lowercase__ : Any = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowercase__ : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
lowercase__ : List[Any] = SegformerImageProcessor()
lowercase__ : List[str] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase__ : Any = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase__ : Optional[Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase__ : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 164 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a = 6):
lowercase__ : Node | None = None
lowercase__ : Node | None = None
self.create_linked_list(a)
def snake_case_ ( self , a):
lowercase__ : str = Node()
lowercase__ : str = current_node
lowercase__ : str = current_node
lowercase__ : Any = current_node
for _ in range(1 , a):
lowercase__ : List[str] = Node()
lowercase__ : Optional[Any] = current_node
lowercase__ : Union[str, Any] = previous_node
lowercase__ : List[str] = current_node
lowercase__ : Optional[int] = self.front
lowercase__ : str = previous_node
def snake_case_ ( self):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def snake_case_ ( self):
self.check_can_perform_operation()
return self.front.data if self.front else None
def snake_case_ ( self , a):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ : Dict = self.rear.next
if self.rear:
lowercase__ : Any = data
def snake_case_ ( self):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ : Any = self.front.data
lowercase__ : Optional[Any] = None
return data
lowercase__ : Optional[Any] = self.front
lowercase__ : str = old_front.next
lowercase__ : Union[str, Any] = old_front.data
lowercase__ : List[str] = None
return data
def snake_case_ ( self):
if self.is_empty():
raise Exception('Empty Queue')
def snake_case_ ( self):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue')
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Any | None = None
lowercase__ : Node | None = None
lowercase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCAmelCase : Tuple = imread("image_data/lena.jpg", 1)
# convert to its negative
_UpperCAmelCase : Tuple = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 3 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : Any = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase : str = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase : List[str] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase = simple_accuracy(lowercase , lowercase )
UpperCamelCase = float(fa_score(y_true=lowercase , y_pred=lowercase ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = float(pearsonr(lowercase , lowercase )[0] )
UpperCamelCase = float(spearmanr(lowercase , lowercase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , A_ , A_ ) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 3 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A__ : Union[str, Any] = 2
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], *, # begin keyword-only arguments
lowerCamelCase : Optional[int]="<s>", lowerCamelCase : Any="<pad>", lowerCamelCase : Union[str, Any]="</s>", lowerCamelCase : Optional[int]="<unk>", lowerCamelCase : Dict=None, ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = bos, unk, pad, eos
lowercase__ = []
lowercase__ = []
lowercase__ = {}
lowercase__ = self.add_symbol(lowerCamelCase )
lowercase__ = self.add_symbol(lowerCamelCase )
lowercase__ = self.add_symbol(lowerCamelCase )
lowercase__ = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
lowercase__ = len(self.symbols )
def __eq__( self : Tuple, lowerCamelCase : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : str, lowerCamelCase : Any ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Union[str, Any], lowerCamelCase : Optional[int] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def lowercase__ ( cls : Union[str, Any], lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = cls()
d.add_from_file(lowerCamelCase )
return d
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : str=1, lowerCamelCase : str=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
lowercase__ = self.indices[word]
lowercase__ = self.count[idx] + n
return idx
else:
lowercase__ = len(self.symbols )
lowercase__ = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def lowercase__ ( self : Tuple, lowerCamelCase : List[str] ):
'''simple docstring'''
return 0
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
try:
with open(lowerCamelCase, '''r''', encoding='''utf-8''' ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(lowerCamelCase ) )
return
lowercase__ = f.readlines()
lowercase__ = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
lowercase__ , lowercase__ = line.rstrip().rsplit(''' ''', 1 )
if field == "#fairseq:overwrite":
lowercase__ = True
lowercase__ , lowercase__ = line.rsplit(''' ''', 1 )
else:
lowercase__ = False
lowercase__ = int(lowerCamelCase )
lowercase__ = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase, n=lowerCamelCase, overwrite=lowerCamelCase )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase__ = dict((re.sub(r'''@@$''' , '''''' , lowerCamelCase_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , lowerCamelCase_ ), v) for k, v in d.items() )
lowercase__ = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowercase__ = d[k] # restore
return da
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# prep
if not os.path.exists(lowerCamelCase_ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase__ = os.path.join(lowerCamelCase_ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )
lowercase__ = chkpt['''cfg''']['''model''']
# dicts
lowercase__ = os.path.join(lowerCamelCase_ , '''dict.txt''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
lowercase__ = Dictionary.load(lowerCamelCase_ )
lowercase__ = rewrite_dict_keys(src_dict.indices )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# merges_file (bpecodes)
lowercase__ = os.path.join(lowerCamelCase_ , '''bpecodes''' )
if not os.path.isfile(lowerCamelCase_ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
lowercase__ = os.path.join(lowerCamelCase_ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCamelCase_ , lowerCamelCase_ )
# model config
lowercase__ = os.path.join(lowerCamelCase_ , '''config.json''' )
lowercase__ = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# tokenizer config
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCamelCase_ , ensure_ascii=lowerCamelCase_ , indent=lowerCamelCase_ ) )
# model
lowercase__ = chkpt['''model''']
# remove unneeded keys
lowercase__ = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase__ = model_state_dict.pop(lowerCamelCase_ )
else:
lowercase__ = model_state_dict.pop(lowerCamelCase_ )
lowercase__ = BioGptConfig.from_pretrained(lowerCamelCase_ )
lowercase__ = BioGptForCausalLM(lowerCamelCase_ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase_ )
# save
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
print('''Conversion is done!''' )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 183 |
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = str(id_ )
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = {} # {vertex:distance}
def __lt__( self : Dict, lowerCamelCase : Tuple ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Dict ):
'''simple docstring'''
return self.id
def lowercase__ ( self : str, lowerCamelCase : List[str] ):
'''simple docstring'''
self.neighbors.append(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = weight
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase_ )
graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = graph[:]
while q:
lowercase__ = min(lowerCamelCase_ )
q.remove(lowerCamelCase_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
for i in range(1 , len(lowerCamelCase_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for u in graph:
lowercase__ = math.inf
lowercase__ = None
lowercase__ = 0
lowercase__ = list(lowerCamelCase_ )
hq.heapify(lowerCamelCase_ )
while h:
lowercase__ = hq.heappop(lowerCamelCase_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase__ = u
lowercase__ = u.edges[v.id]
hq.heapify(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
def __init__( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int]=100 , _UpperCamelCase : Any=13 , _UpperCamelCase : Optional[Any]=30 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=32 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : str=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=10 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : int=3 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=[0, 1, 2, 3] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __snake_case( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __snake_case( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = BeitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : List[Any] = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : str = False
lowercase__ : int = False
lowercase__ : Any = False
def __snake_case( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).loss
loss.backward()
def __snake_case( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BeitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values.to(_UpperCamelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=_UpperCamelCase , bool_masked_pos=_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCamelCase , atol=1e-2 ) )
@slow
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCamelCase )
@slow
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=_UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=_UpperCamelCase , size=640 , do_center_crop=_UpperCamelCase )
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 647 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCamelCase : Optional[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
def __init__( self : Any , _UpperCamelCase : T ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"{self.data}"
class lowercase ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def __iter__( self : str ) -> Iterator[T]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE = node.next
def __str__( self : int ) -> str:
'''simple docstring'''
return "->".join([str(_UpperCamelCase ) for item in self] )
def __len__( self : Tuple ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __snake_case( self : Union[str, Any] ) -> bool:
'''simple docstring'''
return self.top is None
def __snake_case( self : str , _UpperCamelCase : T ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(_UpperCamelCase )
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = node
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.top
SCREAMING_SNAKE_CASE = self.top.next
return pop_node.data
def __snake_case( self : Union[str, Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __snake_case( self : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 647 | 1 |
from sklearn.metrics import fa_score
import datasets
A : Dict = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : Tuple = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : int = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None ):
_lowercase = fa_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase )
return {"f1": float(__UpperCamelCase ) if score.size == 1 else score} | 287 |
from sklearn.metrics import mean_squared_error
import datasets
A : List[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A : str = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A : Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def UpperCamelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True ):
_lowercase = mean_squared_error(
__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase , multioutput=__UpperCamelCase , squared=__UpperCamelCase )
return {"mse": mse} | 287 | 1 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCamelCase_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = None
# source code of `config_class`
a_ = inspect.getsource(__a )
a_ = _re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
a_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
a_ = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
a_ = ckpt_name
break
return checkpoint
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
a_ = get_checkpoint_from_config_class(__a )
a_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
a_ = "\n".join(sorted(__a ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 720 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class snake_case :
a_ : List[str]
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self) ->Tuple:
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class snake_case :
a_ : Optional[List] = None
a_ : Optional[int] = None
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = None
a_ : str = field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = sorted(set(self.languages)) if self.languages else None
a_ = len(self.languages) if self.languages else None
def __call__( self) ->Any:
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = set(self.languages)
if self.languages and set(__UpperCAmelCase) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(__UpperCAmelCase) - lang_set))}) are not in valid set ({", ".join(__UpperCAmelCase)}).''')
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a_ = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
a_ , a_ = zip(*sorted(__UpperCAmelCase))
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
} | 210 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 |
'''simple docstring'''
import socket
def __snake_case ( ):
snake_case_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case_ = socket.gethostname()
snake_case_ = 12_312
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case_ = sock.recv(1_024 )
if not data:
break
out_file.write(lowercase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 508 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = (DPMSolverSDEScheduler,)
UpperCamelCase_ = 1_0
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowerCAmelCase__ )
return config
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] ,[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ ,beta_end=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Any = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : int = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = output.prev_sample
lowerCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = output.prev_sample
lowerCAmelCase_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = output.prev_sample
lowerCAmelCase_ : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : int = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**lowerCAmelCase__ ,use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.dummy_model()
lowerCAmelCase_ : int = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
lowerCAmelCase_ : List[str] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
lowerCAmelCase_ : Any = scheduler.scale_model_input(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = output.prev_sample
lowerCAmelCase_ : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case_ :
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=9_9 , _UpperCamelCase : Tuple=1_3 , _UpperCamelCase : str=7 , _UpperCamelCase : int=9 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[Any]=False , _UpperCamelCase : int=3_2 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : str=4 , _UpperCamelCase : str=3_7 , _UpperCamelCase : Optional[Any]=8 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.002 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , ) ->Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def snake_case__( self : Tuple ) ->int:
return TaConfig.from_pretrained('''google/umt5-base''' )
def snake_case__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=None , ) ->Optional[Any]:
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_UpperCamelCase )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_UpperCamelCase )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, input_dict
def snake_case__( self : str ) ->Optional[int]:
snake_case_, snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__( self : Optional[int] ) ->int:
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__( self : Union[str, Any] ) ->Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , ) ->Union[str, Any]:
snake_case_ = UMTaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(
input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , )
snake_case_ = model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Any , ) ->str:
snake_case_ = UMTaModel(config=_UpperCamelCase ).get_decoder().to(_UpperCamelCase ).eval()
# first forward pass
snake_case_ = model(_UpperCamelCase , use_cache=_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase , use_cache=_UpperCamelCase )
self.parent.assertTrue(len(_UpperCamelCase ) == len(_UpperCamelCase ) )
self.parent.assertTrue(len(_UpperCamelCase ) == len(_UpperCamelCase ) + 1 )
snake_case_, snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(_UpperCamelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCamelCase , past_key_values=_UpperCamelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def snake_case__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , ) ->Dict:
snake_case_ = UMTaModel(config=_UpperCamelCase ).to(_UpperCamelCase ).half().eval()
snake_case_ = model(**_UpperCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_UpperCamelCase ).any().item() )
@require_torch
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE : Optional[Any] = [0.8, 0.9]
def snake_case__( self : Union[str, Any] ) ->List[str]:
snake_case_ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_UpperCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def snake_case__( self : Tuple ) ->List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_UpperCamelCase )
def snake_case__( self : int ) ->str:
snake_case_ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(_UpperCamelCase ).eval()
model.to(_UpperCamelCase )
snake_case_ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_UpperCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCamelCase ),
}
for attn_name, (name, mask) in zip(_UpperCamelCase , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_UpperCamelCase )
snake_case_ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , **_UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def snake_case__( self : List[str] ) ->Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def snake_case__( self : List[str] ) ->Optional[int]:
snake_case_ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
snake_case_ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_UpperCamelCase , legacy=_UpperCamelCase )
snake_case_ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''pt''' , padding=_UpperCamelCase ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model.generate(input_ids.to(_UpperCamelCase ) )
snake_case_ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase ) | 39 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->Any:
snake_case_ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(_UpperCamelCase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('''inf''' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(_UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase , __A ):
'''simple docstring'''
if is_tf_available():
SCREAMING_SNAKE_CASE : Optional[int] = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def snake_case__( self : List[Any] ) ->Optional[int]:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 2
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->List[Any]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) ->List[Any]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_0_2, 1_0_3]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for batch_size in range(1 , len(_UpperCamelCase ) + 1 ):
snake_case_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
def snake_case__( self : List[str] ) ->int:
# TF-only test: tf.saved_model export
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 1
snake_case_ = 2
class snake_case_ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , _UpperCamelCase : Any ) ->List[str]:
super(_UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_UpperCamelCase , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) ->Optional[int]:
snake_case_ = self.model.generate(
input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , max_new_tokens=_UpperCamelCase , return_dict_in_generate=_UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_0_2, 1_0_3]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_UpperCamelCase , _UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} )
snake_case_ = tf.saved_model.load(_UpperCamelCase ).signatures['''serving_default''']
for input_row in range(len(_UpperCamelCase ) ):
snake_case_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**_UpperCamelCase )['''sequences''']
snake_case_ = test_model.generate(**_UpperCamelCase , max_new_tokens=_UpperCamelCase )
tf.debugging.assert_equal(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_text
def snake_case__( self : Optional[Any] ) ->List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_UpperCamelCase )
class snake_case_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ) ->List[Any]:
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : str ) ->List[Any]:
snake_case_ = self.tokenizer.tokenize(_UpperCamelCase )
snake_case_, snake_case_ = text.pad_model_inputs(
_UpperCamelCase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
return self.tokenizer.detokenize(_UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
snake_case_ = complete_model(_UpperCamelCase )
snake_case_ = tf.keras.Model(_UpperCamelCase , _UpperCamelCase )
keras_model.save(_UpperCamelCase )
def snake_case__( self : Any ) ->List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 1_0,
'''temperature''': 0.7,
}
snake_case_ = 1_4
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = '''Hello, my dog is cute and'''
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''tf''' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
snake_case_ = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def snake_case__( self : str ) ->Dict:
# Has PT equivalent: ample use of framework-specific code
snake_case_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = '''Hugging Face is a technology company based in New York and Paris.'''
snake_case_ = bart_tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
class snake_case_ ( __A ):
'''simple docstring'''
def snake_case__( self : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple=None , **_UpperCamelCase : Optional[int] ) ->List[str]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
snake_case_ = bart_model.generate(_UpperCamelCase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_UpperCamelCase , _UpperCamelCase ) )
class snake_case_ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def snake_case__( self : Union[str, Any] , _UpperCamelCase : str , **_UpperCamelCase : Tuple ) ->Optional[Any]:
return super().call(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(_UpperCamelCase ).numpy()
with self.assertRaises(_UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_UpperCamelCase , foo='''bar''' ) | 39 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def _UpperCAmelCase ( self : Any , a : Optional[Any] ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : str , a : Union[str, Any] , *a : List[str] , **a : Optional[int] ) -> int:
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : str , a : Optional[Any] , a : Optional[int] ) -> Any:
return output + 1
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(a , a )
self.assertEqual(test_model._hf_hook , a )
self.assertTrue(hasattr(a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(a )
self.assertFalse(hasattr(a , """_hf_hook""" ) )
self.assertFalse(hasattr(a , """_old_forward""" ) )
def _UpperCAmelCase ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = ModelHook()
add_hook_to_module(a , a )
add_hook_to_module(a , a , append=a )
self.assertEqual(isinstance(test_model._hf_hook , a ) , a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(a )
self.assertFalse(hasattr(a , """_hf_hook""" ) )
self.assertFalse(hasattr(a , """_old_forward""" ) )
def _UpperCAmelCase ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = test_model(x + 1 )
SCREAMING_SNAKE_CASE = test_model(x + 2 )
SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE = PreForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
assert torch.allclose(a , a , atol=1E-5 )
def _UpperCAmelCase ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = test_model(a )
SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
assert torch.allclose(a , output + 2 , atol=1E-5 )
def _UpperCAmelCase ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = test_model(a )
SCREAMING_SNAKE_CASE = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = test_model(a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a , AlignDevicesHook(io_same_device=a ) )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 ).to(0 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , torch.device(0 ) )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
SCREAMING_SNAKE_CASE = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a , execution_device=a , offload=a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE = torch.device(a )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a , execution_device=a , offload=a , offload_buffers=a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCAmelCase ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a , execution_device=a , offload=a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE = torch.device(a )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a , execution_device=a , offload=a , weights_map=model.state_dict() , offload_buffers=a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
SCREAMING_SNAKE_CASE = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 450 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ViTImageProcessor'''
a__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , a : Dict=None , a : List[str]=None , **a : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
def __call__( self : List[Any] , a : int=None , a : Dict=None , a : int=None , a : Tuple=None , **a : Any ) -> Any:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self : Dict , *a : Dict , **a : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self : Tuple , *a : List[Any] , **a : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : Optional[int] ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 450 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = """mask2former"""
UpperCamelCase_ :Dict = ["""swin"""]
UpperCamelCase_ :List[Any] = {"""hidden_size""": """hidden_dim"""}
def __init__( self , _lowercase = None , _lowercase = 256 , _lowercase = 256 , _lowercase = 256 , _lowercase = 1_024 , _lowercase = "relu" , _lowercase = 6 , _lowercase = 10 , _lowercase = 8 , _lowercase = 0.0 , _lowercase = 2_048 , _lowercase = False , _lowercase = False , _lowercase = 4 , _lowercase = 255 , _lowercase = 100 , _lowercase = 0.1 , _lowercase = 2.0 , _lowercase = 5.0 , _lowercase = 5.0 , _lowercase = 12_544 , _lowercase = 3.0 , _lowercase = 0.75 , _lowercase = 0.02 , _lowercase = 1.0 , _lowercase = True , _lowercase = [4, 8, 16, 32] , _lowercase = None , **_lowercase , )-> Union[str, Any]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCamelCase_ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = backbone_config.pop("model_type" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
UpperCamelCase_ = backbone_config
UpperCamelCase_ = feature_size
UpperCamelCase_ = mask_feature_size
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = encoder_feedforward_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = dim_feedforward
UpperCamelCase_ = pre_norm
UpperCamelCase_ = enforce_input_projection
UpperCamelCase_ = common_stride
UpperCamelCase_ = ignore_value
UpperCamelCase_ = num_queries
UpperCamelCase_ = no_object_weight
UpperCamelCase_ = class_weight
UpperCamelCase_ = mask_weight
UpperCamelCase_ = dice_weight
UpperCamelCase_ = train_num_points
UpperCamelCase_ = oversample_ratio
UpperCamelCase_ = importance_sample_ratio
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = feature_strides
UpperCamelCase_ = output_auxiliary_logits
UpperCamelCase_ = decoder_layers
super().__init__(**_lowercase )
@classmethod
def UpperCAmelCase_ ( cls , _lowercase , **_lowercase )-> Optional[int]:
return cls(
backbone_config=_lowercase , **_lowercase , )
def UpperCAmelCase_ ( self )-> Dict[str, any]:
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 628 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[int] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 628 | 1 |
import os
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase__ ) + '/p022_names.txt' ) as file:
_snake_case = str(file.readlines()[0] )
_snake_case = names.replace('"' , '' ).split(',' )
names.sort()
_snake_case = 0
_snake_case = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
_snake_case = 0
return total_score
if __name__ == "__main__":
print(solution())
| 541 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase_ ( unittest.TestCase ):
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_snake_case = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
_snake_case = tempfile.mkdtemp()
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
# load decoder from hub
_snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
_snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[int]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = self.get_feature_extractor()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(lowerCAmelCase_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = floats_list((3, 1000) )
_snake_case = feature_extractor(lowerCAmelCase_ , return_tensors='np' )
_snake_case = processor(lowerCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = 'This is a test string'
_snake_case = processor(text=lowerCAmelCase_ )
_snake_case = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> int:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_snake_case = processor.decode(lowerCAmelCase_ )
_snake_case = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_snake_case = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_snake_case = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(lowerCAmelCase_ )
with get_context('fork' ).Pool() as p:
_snake_case = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = self._get_dummy_logits()
_snake_case = 15
_snake_case = -20.0
_snake_case = -4.0
_snake_case = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_snake_case = decoded_processor_out.text
_snake_case = list(lowerCAmelCase_ )
with get_context('fork' ).Pool() as pool:
_snake_case = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_snake_case = [d[0][0] for d in decoded_decoder_out]
_snake_case = [d[0][2] for d in decoded_decoder_out]
_snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , lowerCAmelCase_ , atol=1E-3 ) )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_snake_case = self._get_dummy_logits()
_snake_case = 2.0
_snake_case = 5.0
_snake_case = -20.0
_snake_case = True
_snake_case = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_snake_case = decoded_processor_out.text
_snake_case = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context('fork' ).Pool() as pool:
_snake_case = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowerCAmelCase_ )
_snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_snake_case = processor.decoder.model_container[processor.decoder._model_key]
_snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_snake_case = os.listdir(lowerCAmelCase_ )
_snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
_snake_case = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_snake_case = processor.decoder.model_container[processor.decoder._model_key]
_snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_snake_case = os.listdir(lowerCAmelCase_ )
_snake_case = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
_snake_case = floats_list((3, 1000) )
_snake_case = processor_wavaveca(lowerCAmelCase_ , return_tensors='np' )
_snake_case = processor_auto(lowerCAmelCase_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_snake_case = self._get_dummy_logits()
_snake_case = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_snake_case = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = self.get_feature_extractor()
_snake_case = self.get_tokenizer()
_snake_case = self.get_decoder()
_snake_case = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self ) -> Dict:
_snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_snake_case = self._get_dummy_logits()[0]
_snake_case = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowerCAmelCase ( self ) -> str:
_snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_snake_case = self._get_dummy_logits()
_snake_case = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(lowerCAmelCase_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
_snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=lowerCAmelCase_ )
_snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
_snake_case = iter(lowerCAmelCase_ )
_snake_case = next(lowerCAmelCase_ )
_snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
_snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ ).logits.cpu().numpy()
_snake_case = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase_ , 'word' ) ) , lowerCAmelCase_ )
self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase_ , 'word' ) ) , output.text )
# output times
_snake_case = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , 'start_time' ) )
_snake_case = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , 'end_time' ) )
# fmt: off
_snake_case = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
_snake_case = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 541 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str]=1_0 ):
__a : Optional[Any] = []
for _ in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=1_0 ):
__a : Union[str, Any] = []
for step in range(lowerCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = os.path.join(lowerCamelCase_ , 'schedule.bin' )
torch.save(scheduler.state_dict() , lowerCamelCase_ )
__a : Tuple = torch.load(lowerCamelCase_ )
scheduler.load_state_dict(lowerCamelCase_ )
return lrs
@require_torch
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : List[str] = torch.tensor([0.4, 0.2, -0.5] )
__a : List[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : int = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
__a : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : int = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , )
for _ in range(1_0_0_0 ):
__a : str = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__SCREAMING_SNAKE_CASE : str = 10
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ):
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : str = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Any = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : str = data
__a : Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : List[Any] = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
__a : Dict = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule
__a : Optional[int] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f'''failed for {scheduler_func} in save and reload''' )
class _UpperCamelCase:
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : str = fn
def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : List[Any] = list(map(self , scheduler.lr_lambdas ) )
| 47 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
snake_case = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
snake_case = (
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
)
snake_case = '''|'''.join(sys.argv[1:])
snake_case = re.compile(rF"^({joined_dirs}).*?\.py$")
snake_case = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 404 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ ) -> None:
_snake_case = generate_pascal_triangle(lowerCAmelCase_ )
for row_idx in range(lowerCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def snake_case ( lowerCAmelCase_ ) -> list[list[int]]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_snake_case = []
for current_row_idx in range(lowerCAmelCase_ ):
_snake_case = populate_current_row(lowerCAmelCase_ , lowerCAmelCase_ )
triangle.append(lowerCAmelCase_ )
return triangle
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[int]:
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , lowerCAmelCase_ ):
calculate_current_element(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return current_row
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def snake_case ( lowerCAmelCase_ ) -> list[list[int]]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
_snake_case = [[1]]
for row_index in range(1 , lowerCAmelCase_ ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(lowerCAmelCase_ , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(lowerCAmelCase_ )
return result
def snake_case ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_snake_case = f"""{func.__name__}({value})"""
_snake_case = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 404 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_UpperCamelCase : Optional[int] ='.'
if __name__ == "__main__":
_UpperCamelCase : List[str] =os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_UpperCamelCase : str =[]
_UpperCamelCase : str =[]
with open(doctest_file_path) as fp:
for line in fp:
_UpperCamelCase : Tuple =line.strip()
_UpperCamelCase : str =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_UpperCamelCase : Optional[int] ='\n'.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 206 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 206 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'swin2sr'
lowercase = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , A=64 , A=1 , A=3 , A=180 , A=[6, 6, 6, 6, 6, 6] , A=[6, 6, 6, 6, 6, 6] , A=8 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.0_2 , A=1e-5 , A=2 , A=1.0 , A="1conv" , A="pixelshuffle" , **A , ) -> List[str]:
super().__init__(**A )
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Union[str, Any] = embed_dim
UpperCAmelCase : Dict = depths
UpperCAmelCase : Dict = len(A )
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : str = window_size
UpperCAmelCase : Tuple = mlp_ratio
UpperCAmelCase : List[str] = qkv_bias
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : List[str] = drop_path_rate
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : List[str] = use_absolute_embeddings
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = upscale
UpperCAmelCase : Tuple = img_range
UpperCAmelCase : Tuple = resi_connection
UpperCAmelCase : Tuple = upsampler
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Tuple =[[float("""inf""" ) for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase )]
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__magic_name__ : Dict =graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase ):
# looping through rows of graph array
for i in range(lowerCamelCase ):
# looping through columns of graph array
for j in range(lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__magic_name__ : List[str] =dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase , lowerCamelCase )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = int(input("Enter number of vertices: "))
UpperCAmelCase_ : str = int(input("Enter number of edges: "))
UpperCAmelCase_ : Optional[Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : str = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCAmelCase_ : Optional[Any] = int(input("Enter source:"))
UpperCAmelCase_ : Union[str, Any] = int(input("Enter destination:"))
UpperCAmelCase_ : Dict = float(input("Enter weight:"))
UpperCAmelCase_ : Union[str, Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 21 |
'''simple docstring'''
import os
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) ) as in_file:
__a = in_file.read()
__a = [[int(SCREAMING_SNAKE_CASE__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
__a = [[0 for cell in row] for row in grid]
__a = len(grid[0] )
__a = [[0 for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
__a = grid[0][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[0][i] + dp[0][i - 1]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][0] + dp[i - 1][0]
for i in range(1, SCREAMING_SNAKE_CASE__ ):
for j in range(1, SCREAMING_SNAKE_CASE__ ):
__a = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 448 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : List[str] , __snake_case : UNetaDModel , __snake_case : ScoreSdeVeScheduler ) -> Tuple:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : List[Any] , __snake_case : int = 1 , __snake_case : int = 2000 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , **__snake_case : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = self.unet.config.sample_size
UpperCAmelCase : str = (batch_size, 3, img_size, img_size)
UpperCAmelCase : Optional[int] = self.unet
UpperCAmelCase : int = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
UpperCAmelCase : int = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase : Optional[int] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase : Dict = self.unet(__snake_case , __snake_case ).sample
UpperCAmelCase : int = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
UpperCAmelCase : List[str] = model(__snake_case , __snake_case ).sample
UpperCAmelCase : Optional[Any] = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
UpperCAmelCase : Dict = output.prev_sample, output.prev_sample_mean
UpperCAmelCase : Optional[Any] = sample_mean.clamp(0 , 1 )
UpperCAmelCase : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 703 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase__ = Features({"""text""": Value("""string""" )} )
lowerCamelCase__ = Features({} )
lowerCamelCase__ = "text"
@property
def A ( self : Optional[Any] ) -> Dict[str, str]:
return {self.text_column: "text"}
| 528 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Optional[int] = KandinskyInpaintPipeline
__a : Optional[int] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__a : int = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__a : int = False
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return 1_00
@property
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
_UpperCAmelCase : Optional[Any] = MultilingualCLIP(_A )
_UpperCAmelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : List[Any] = UNetaDConditionModel(**_A )
return model
@property
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : List[str] = self.dummy_tokenizer
_UpperCAmelCase : Dict = self.dummy_unet
_UpperCAmelCase : List[str] = self.dummy_movq
_UpperCAmelCase : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_A , )
_UpperCAmelCase : int = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __snake_case ( self , _A , _A=0 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
_UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
_UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
_UpperCAmelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
_UpperCAmelCase : List[str] = 0
if str(_A ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(_A )
else:
_UpperCAmelCase : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
_UpperCAmelCase : Optional[int] = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """cpu"""
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = self.pipeline_class(**_A )
_UpperCAmelCase : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) )
_UpperCAmelCase : List[Any] = output.images
_UpperCAmelCase : int = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : List[str] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __snake_case ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
_UpperCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_UpperCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Tuple = """a hat"""
_UpperCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
_UpperCAmelCase : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
_UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : List[Any] = pipeline(
_A , image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_A , _A )
| 238 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCamelCase__ : List[str] = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def UpperCamelCase ( _lowerCAmelCase : str = "dhaka", _lowerCAmelCase : int = 5 ) -> int:
_UpperCAmelCase : str = min(_lowerCAmelCase, 50 ) # Prevent abuse!
_UpperCAmelCase : List[str] = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_UpperCAmelCase : Optional[int] = requests.get("""https://www.google.com/search""", params=_lowerCAmelCase, headers=_lowerCAmelCase )
_UpperCAmelCase : int = BeautifulSoup(html.text, """html.parser""" )
_UpperCAmelCase : Any = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""", str(soup.select("""script""" ) ) ) )
_UpperCAmelCase : Optional[int] = json.dumps(_lowerCAmelCase )
_UpperCAmelCase : str = json.loads(_lowerCAmelCase )
_UpperCAmelCase : Any = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""", _lowerCAmelCase, )
if not matched_google_image_data:
return 0
_UpperCAmelCase : Any = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""", """""", str(_lowerCAmelCase ), )
_UpperCAmelCase : Union[str, Any] = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""", _lowerCAmelCase, )
for index, fixed_full_res_image in enumerate(_lowerCAmelCase ):
if index >= max_images:
return index
_UpperCAmelCase : Tuple = bytes(_lowerCAmelCase, """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase : Optional[Any] = bytes(_lowerCAmelCase, """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase : List[str] = urllib.request.build_opener()
_UpperCAmelCase : Optional[int] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = f'''query_{query.replace(" ", "_" )}'''
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
_lowerCAmelCase, f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowerCamelCase__ : List[str] = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 238 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 404 |
"""simple docstring"""
def snake_case ( ) -> Tuple:
_snake_case = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCAmelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 404 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'width_multiplier' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_="swish" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0.25 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = conv_kernel_size
lowerCamelCase_ = output_stride
lowerCamelCase_ = classifier_dropout_prob
lowerCamelCase_ = use_labels
lowerCamelCase_ = is_training
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = width_multiplier
lowerCamelCase_ = ffn_dropout
lowerCamelCase_ = attn_dropout
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = MobileViTVaModelTester(self )
lowerCamelCase_ = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCamelCase_ = model.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits
# verify the logits
lowerCamelCase_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCamelCase_ = model.to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(50, 60)] )
lowerCamelCase_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 42 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json()
lowerCamelCase__ = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowercase ):
lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase ).json()
lowerCamelCase__ = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCamelCase__ = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowercase ):
lowerCamelCase__ = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _A ( __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = None
if token is not None:
lowerCamelCase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCamelCase__ = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase )
lowerCamelCase__ = result.headers["""Location"""]
lowerCamelCase__ = requests.get(__lowercase , allow_redirects=__lowercase )
lowerCamelCase__ = os.path.join(__lowercase , f"""{artifact_name}.zip""" )
with open(__lowercase , """wb""" ) as fp:
fp.write(response.content )
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = None
with zipfile.ZipFile(__lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase ) as f:
for line in f:
lowerCamelCase__ = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase__ = line[: line.index(""": """ )]
lowerCamelCase__ = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCamelCase__ = line[len("""FAILED """ ) :]
failed_tests.append(__lowercase )
elif filename == "job_name.txt":
lowerCamelCase__ = line
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` """
f"""and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCamelCase__ = None
if job_name and job_links:
lowerCamelCase__ = job_links.get(__lowercase , __lowercase )
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase__ = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )]
return result
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) )
return errors
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = Counter()
counter.update([x[1] for x in logs] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase__ = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCamelCase__ = test.split("""/""" )[2]
else:
lowerCamelCase__ = None
return test
def _A ( __lowercase , __lowercase=None ):
"""simple docstring"""
lowerCamelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCamelCase__ = [x for x in logs if x[2] is not None]
lowerCamelCase__ = {x[2] for x in logs}
lowerCamelCase__ = {}
for test in tests:
lowerCamelCase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCamelCase__ = counter.most_common()
lowerCamelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase__ = sum(error_counts.values() )
if n_errors > 0:
lowerCamelCase__ = {"""count""": n_errors, """errors""": error_counts}
lowerCamelCase__ = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """| no. | error | status |"""
lowerCamelCase__ = """|-:|:-|:-|"""
lowerCamelCase__ = [header, sep]
for error in reduced_by_error:
lowerCamelCase__ = reduced_by_error[error]["""count"""]
lowerCamelCase__ = f"""| {count} | {error[:100]} | |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = """| model | no. of errors | major error | count |"""
lowerCamelCase__ = """|-:|-:|-:|-:|"""
lowerCamelCase__ = [header, sep]
for model in reduced_by_model:
lowerCamelCase__ = reduced_by_model[model]["""count"""]
lowerCamelCase__ , lowerCamelCase__ = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCamelCase__ = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
__magic_name__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__magic_name__ = get_job_links(args.workflow_run_id, token=args.token)
__magic_name__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__magic_name__ = k.find(""" / """)
__magic_name__ = k[index + len(""" / """) :]
__magic_name__ = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__magic_name__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__magic_name__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__magic_name__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__magic_name__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__magic_name__ = reduce_by_error(errors)
__magic_name__ = reduce_by_model(errors)
__magic_name__ = make_github_table(reduced_by_error)
__magic_name__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 129 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['PerceiverFeatureExtractor']
snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = '▁'
snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : int = BertGenerationTokenizer
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : str = True
def A ( self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(lowercase__ ) , 1002 )
def A ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowercase__ , keep_accents=lowercase__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self ) -> Any:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [18536, 2260, 101]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase__ , self.big_tokenizer.encode(lowercase__ ) )
@require_torch
@slow
def A ( self ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE = ' '.join(lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowercase__ , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=lowercase__ )
SCREAMING_SNAKE_CASE = BertGenerationConfig()
SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowercase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase__ )
model(**lowercase__ )
@slow
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 406 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __snake_case : str = "https://www.worldometers.info/coronavirus" ) -> dict:
lowercase : str = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
lowercase : List[str] = soup.findAll("h1" )
lowercase : Any = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 361 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_A : str = False
try:
_A : Dict = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self , _a = None , _a = [] ):
lowercase : Union[str, Any] = 0
lowercase : str = choices
lowercase : List[Any] = prompt
if sys.platform == "win32":
lowercase : Union[str, Any] = "*"
else:
lowercase : int = "➔ "
def __magic_name__ ( self , _a , _a = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def __magic_name__ ( self , _a ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(_a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __magic_name__ ( self , _a , _a = 1 ):
lowercase : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __magic_name__ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __magic_name__ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def __magic_name__ ( self ):
lowercase : List[str] = int(chr(self.current_selection ) )
lowercase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def __magic_name__ ( self , _a = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase : Tuple = int(builtins.input() )
except ValueError:
lowercase : Optional[int] = default_choice
else:
lowercase : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_a , "\n" )
return choice
| 361 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int
a_ : TreeNode | None =None
a_ : TreeNode | None =None
lowerCAmelCase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase_ ( lowerCAmelCase: TreeNode | None )-> int:
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase ) != count_coins(lowerCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCAmelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_snake_case , _snake_case : Optional[Any] = get_distrib(node.left )
_snake_case , _snake_case : int = get_distrib(node.right )
_snake_case : Optional[int] = 1 - left_distrib_excess
_snake_case : List[str] = 1 - right_distrib_excess
_snake_case : List[str] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase )
+ abs(lowerCAmelCase )
)
_snake_case : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase , lowerCAmelCase )
return get_distrib(lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
snake_case_ : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
snake_case_ : List[Any] = model(_UpperCamelCase )["""last_hidden_state"""]
snake_case_ : Any = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) ) | 334 |
'''simple docstring'''
__A : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
snake_case_ : int = int(lowerCamelCase_ )
snake_case_ : int = """"""
snake_case_ : List[str] = False
if decimal < 0:
snake_case_ : Any = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ : List[str] = divmod(lowerCamelCase_ , 16 )
snake_case_ : Tuple = values[remainder] + hexadecimal
snake_case_ : Dict = """0x""" + hexadecimal
if negative:
snake_case_ : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 334 | 1 |
def __UpperCamelCase ( _A : Any ) ->Any:
"""simple docstring"""
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
lowerCamelCase_ =f'The input value of [n={number}] has to be > 0'
raise ValueError(UpperCamelCase__ )
else:
lowerCamelCase_ =sylvester(number - 1 )
lowerCamelCase_ =num - 1
lowerCamelCase_ =num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester\'s sequence: {sylvester(8)}""")
| 719 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A : Dict = namedtuple('covid_data', 'cases deaths recovered')
def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data:
"""simple docstring"""
lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) )
__A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 75 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase_ = datasets.utils.logging.get_logger(__name__)
UpperCamelCase_ = ["names", "prefix"]
UpperCamelCase_ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
UpperCamelCase_ = ["encoding_errors", "on_bad_lines"]
UpperCamelCase_ = ["date_format"]
@dataclass
class _a ( datasets.BuilderConfig ):
'''simple docstring'''
A : str = ","
A : Optional[str] = None
A : Optional[Union[int, List[int], str]] = "infer"
A : Optional[List[str]] = None
A : Optional[List[str]] = None
A : Optional[Union[int, str, List[int], List[str]]] = None
A : Optional[Union[List[int], List[str]]] = None
A : Optional[str] = None
A : bool = True
A : Optional[Literal["c", "python", "pyarrow"]] = None
A : Dict[Union[int, str], Callable[[Any], Any]] = None
A : Optional[list] = None
A : Optional[list] = None
A : bool = False
A : Optional[Union[int, List[int]]] = None
A : Optional[int] = None
A : Optional[Union[str, List[str]]] = None
A : bool = True
A : bool = True
A : bool = False
A : bool = True
A : Optional[str] = None
A : str = "."
A : Optional[str] = None
A : str = '"'
A : int = 0
A : Optional[str] = None
A : Optional[str] = None
A : Optional[str] = None
A : Optional[str] = None
A : bool = True
A : bool = True
A : int = 0
A : bool = True
A : bool = False
A : Optional[str] = None
A : int = 10_000
A : Optional[datasets.Features] = None
A : Optional[str] = "strict"
A : Literal["error", "warn", "skip"] = "error"
A : Optional[str] = None
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.delimiter is not None:
SCREAMING_SNAKE_CASE : List[str] = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE : Tuple = self.column_names
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), A ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _a ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
A : Tuple = CsvConfig
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A, (str, list, tuple) ):
SCREAMING_SNAKE_CASE : List[Any] = data_files
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Dict = [files]
SCREAMING_SNAKE_CASE : int = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = [files]
SCREAMING_SNAKE_CASE : List[str] = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A, gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.config.features is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(A ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=A )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(A, A )
return pa_table
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE : List[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ):
SCREAMING_SNAKE_CASE : Any = pd.read_csv(A, iterator=A, dtype=A, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A ):
SCREAMING_SNAKE_CASE : Dict = pa.Table.from_pandas(A )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(A )}: {e}" )
raise
| 28 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 1 |
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str )-> int:
if height >= 1:
move_tower(height - 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
move_disk(UpperCamelCase_ , UpperCamelCase_ )
move_tower(height - 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] )-> str:
print('''moving disk from''' , UpperCamelCase_ , '''to''' , UpperCamelCase_ )
def lowerCAmelCase__ ( )-> Tuple:
A__ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(UpperCamelCase_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 526 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def lowerCAmelCase__ ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 1_6 )-> List[str]:
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='''longest''' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple )-> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase_ ) == "1":
A__ = 2
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=UpperCamelCase_ )
def inner_training_loop(UpperCamelCase_ : Optional[int] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
A__ , A__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCamelCase_ )
A__ = outputs.loss
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase__ ( )-> Optional[Any]:
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 526 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
a_ = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
a_ = """UperNetConfig"""
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = False , __UpperCAmelCase = 1 , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , bias=__UpperCAmelCase , dilation=__UpperCAmelCase , )
__lowerCamelCase = nn.BatchNormad(__UpperCAmelCase )
__lowerCamelCase = nn.ReLU()
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.conv(__UpperCAmelCase )
__lowerCamelCase = self.batch_norm(__UpperCAmelCase )
__lowerCamelCase = self.activation(__UpperCAmelCase )
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = [
nn.AdaptiveAvgPoolad(__UpperCAmelCase ),
UperNetConvModule(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = input
for layer in self.layers:
__lowerCamelCase = layer(__UpperCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = pool_scales
__lowerCamelCase = align_corners
__lowerCamelCase = in_channels
__lowerCamelCase = channels
__lowerCamelCase = []
for i, pool_scale in enumerate(__UpperCAmelCase ):
__lowerCamelCase = UperNetPyramidPoolingBlock(pool_scale=__UpperCAmelCase , in_channels=__UpperCAmelCase , channels=__UpperCAmelCase )
self.blocks.append(__UpperCAmelCase )
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for ppm in self.blocks:
__lowerCamelCase = ppm(__UpperCAmelCase )
__lowerCamelCase = nn.functional.interpolate(
__UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__UpperCAmelCase )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = config
__lowerCamelCase = config.pool_scales # e.g. (1, 2, 3, 6)
__lowerCamelCase = in_channels
__lowerCamelCase = config.hidden_size
__lowerCamelCase = False
__lowerCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__lowerCamelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__lowerCamelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__lowerCamelCase = nn.ModuleList()
__lowerCamelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__lowerCamelCase = UperNetConvModule(__UpperCAmelCase , self.channels , kernel_size=1 )
__lowerCamelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCAmelCase )
self.fpn_convs.append(__UpperCAmelCase )
__lowerCamelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowerCamelCase ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = inputs[-1]
__lowerCamelCase = [x]
psp_outs.extend(self.psp_modules(__UpperCAmelCase ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=1 )
__lowerCamelCase = self.bottleneck(__UpperCAmelCase )
return output
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# build laterals
__lowerCamelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCAmelCase ) )
# build top-down path
__lowerCamelCase = len(__UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCamelCase = laterals[i - 1].shape[2:]
__lowerCamelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__lowerCamelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCamelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=1 )
__lowerCamelCase = self.fpn_bottleneck(__UpperCAmelCase )
__lowerCamelCase = self.classifier(__UpperCAmelCase )
return output
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = config
__lowerCamelCase = config.auxiliary_in_channels
__lowerCamelCase = config.auxiliary_channels
__lowerCamelCase = config.auxiliary_num_convs
__lowerCamelCase = config.auxiliary_concat_input
__lowerCamelCase = in_index
__lowerCamelCase = (kernel_size // 2) * dilation
__lowerCamelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
if self.num_convs == 0:
__lowerCamelCase = nn.Identity()
else:
__lowerCamelCase = nn.Sequential(*__UpperCAmelCase )
if self.concat_input:
__lowerCamelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=kernel_size // 2 )
__lowerCamelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# just take the relevant feature maps
__lowerCamelCase = encoder_hidden_states[self.in_index]
__lowerCamelCase = self.convs(__UpperCAmelCase )
if self.concat_input:
__lowerCamelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__lowerCamelCase = self.classifier(__UpperCAmelCase )
return output
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = UperNetConfig
lowerCAmelCase__ = """pixel_values"""
lowerCAmelCase__ = True
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowerCamelCase ( self ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = value
a_ = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowerCAmelCase__ , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__lowerCamelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__lowerCamelCase = UperNetHead(__UpperCAmelCase , in_channels=self.backbone.channels )
__lowerCamelCase = UperNetFCNHead(__UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowerCamelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
__lowerCamelCase = self.backbone.forward_with_filtered_kwargs(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
__lowerCamelCase = outputs.feature_maps
__lowerCamelCase = self.decode_head(__UpperCAmelCase )
__lowerCamelCase = nn.functional.interpolate(__UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = None
if self.auxiliary_head is not None:
__lowerCamelCase = self.auxiliary_head(__UpperCAmelCase )
__lowerCamelCase = nn.functional.interpolate(
__UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__lowerCamelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__lowerCamelCase = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__lowerCamelCase = (logits,) + outputs[1:]
else:
__lowerCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 175 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
__lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
__lowerCamelCase ,__lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(__UpperCAmelCase )['''last_hidden_state''']
__lowerCamelCase = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
__lowerCamelCase = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase__ = [0.8, 0.9]
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = model.generate(input_ids.to(__UpperCAmelCase ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 175 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def A(__a: str ):
lowerCAmelCase_ = VideoMAEConfig()
set_architecture_configs(__a , __a )
if "finetuned" not in model_name:
lowerCAmelCase_ = False
if "finetuned" in model_name:
lowerCAmelCase_ = "huggingface/label-files"
if "kinetics" in model_name:
lowerCAmelCase_ = 400
lowerCAmelCase_ = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCAmelCase_ = 174
lowerCAmelCase_ = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
lowerCAmelCase_ = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__a ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A(__a: int , __a: Dict ):
if "small" in model_name:
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
elif "large" in model_name:
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 8
lowerCAmelCase_ = 512
lowerCAmelCase_ = 2048
elif "huge" in model_name:
lowerCAmelCase_ = 1280
lowerCAmelCase_ = 5120
lowerCAmelCase_ = 32
lowerCAmelCase_ = 16
lowerCAmelCase_ = 12
lowerCAmelCase_ = 8
lowerCAmelCase_ = 640
lowerCAmelCase_ = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def A(__a: List[Any] ):
if "encoder." in name:
lowerCAmelCase_ = name.replace("encoder." , "" )
if "cls_token" in name:
lowerCAmelCase_ = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
lowerCAmelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase_ = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
lowerCAmelCase_ = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCAmelCase_ = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
lowerCAmelCase_ = name.replace("attn" , "attention.self" )
if "attn" in name:
lowerCAmelCase_ = name.replace("attn" , "attention.attention" )
if "norm1" in name:
lowerCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCAmelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCAmelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCAmelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
lowerCAmelCase_ = name.replace("head" , "classifier" )
return name
def A(__a: Optional[Any] , __a: Optional[Any] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(__a )
if key.startswith("encoder." ):
lowerCAmelCase_ = key.replace("encoder." , "" )
if "qkv" in key:
lowerCAmelCase_ = key.split("." )
if key.startswith("decoder.blocks" ):
lowerCAmelCase_ = config.decoder_hidden_size
lowerCAmelCase_ = int(key_split[2] )
lowerCAmelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = config.hidden_size
lowerCAmelCase_ = int(key_split[1] )
lowerCAmelCase_ = "videomae.encoder.layer."
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val
return orig_state_dict
def A():
lowerCAmelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCAmelCase_ = np.load(__a )
return list(__a )
def A(__a: Optional[Any] , __a: Tuple , __a: Any , __a: Optional[Any] ):
lowerCAmelCase_ = get_videomae_config(__a )
if "finetuned" in model_name:
lowerCAmelCase_ = VideoMAEForVideoClassification(__a )
else:
lowerCAmelCase_ = VideoMAEForPreTraining(__a )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase_ = "pytorch_model.bin"
gdown.cached_download(__a , __a , quiet=__a )
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
if "model" in files:
lowerCAmelCase_ = files["model"]
else:
lowerCAmelCase_ = files["module"]
lowerCAmelCase_ = convert_state_dict(__a , __a )
model.load_state_dict(__a )
model.eval()
# verify model on basic input
lowerCAmelCase_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(__a , return_tensors="pt" )
if "finetuned" not in model_name:
lowerCAmelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCAmelCase_ = torch.load(__a )
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase_ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase_ = torch.Size([1, 400] )
lowerCAmelCase_ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase_ = torch.Size([1, 174] )
lowerCAmelCase_ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __a , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase_ = outputs.loss
assert torch.allclose(__a , __a , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
model.save_pretrained(__a )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(__a , organization="nielsr" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 701 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ''''''
lowerCamelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> Any:
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _a ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip("/" )
def __a ( self ) -> Union[str, Any]:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _a ) -> Dict:
return self.file.open().read()
def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> Optional[Any]:
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''.bz2'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''.gz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''.lz4'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''.xz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''.zst'''
def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Tuple:
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *_a , **_a ) -> Dict:
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> List[Any]:
return iter(self._file )
def __a ( self ) -> List[Any]:
return next(self._file )
def __getattr__( self , _a ) -> Tuple:
return getattr(self._file , _a )
def fixed_enter(*_a , **_a ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 226 | 0 |
"""simple docstring"""
from torch import nn
def UpperCAmelCase ( _lowercase : List[Any] ) -> str:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" ) | 552 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase ( _lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCAmelCase_ = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCAmelCase_ = args.output + '''.pt'''
lowerCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir )
lowerCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase_ = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCAmelCase_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCAmelCase_ = 8
lowerCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
lowerCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCAmelCase_ = key_name[-9:-7]
for i in range(1_6 ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCAmelCase_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
lowerCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
lowerCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
lowerCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase_ = state[:, 0, :, :]
lowerCAmelCase_ = state[:, 1, :, :]
lowerCAmelCase_ = state[:, 2, :, :]
lowerCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(_lowercase )
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(_lowercase )
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
lowerCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCAmelCase_ = '''model.%s.weight''' % nlayer
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
lowerCAmelCase_ = '''lm_head.weight'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
lowerCAmelCase_ = '''final_logits_bias'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = state.reshape((1, -1) )
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
lowerCAmelCase_ = '''model.last_project.weight'''
lowerCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
lowerCAmelCase_ = '''model.last_project.bias'''
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 552 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_lowerCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sgugger/tiny-distilbert-classification'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , only_pretrain_model=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , torchscript=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , fpaa=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , save_to_csv=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCAmelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCAmelCase , '''env.csv''' ) , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''env.csv''' ) ).exists() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCAmelCase , '''log.txt''' ) , log_print=_lowerCAmelCase , trace_memory_line_by_line=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''log.txt''' ) ).exists() ) | 664 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5] | 664 | 1 |
"""simple docstring"""
from math import isqrt, loga
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->list[int]:
_lowerCamelCase : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : List[Any] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 80_0800 , SCREAMING_SNAKE_CASE_ = 80_0800 ) ->int:
_lowerCamelCase : Tuple = degree * loga(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = int(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = calculate_prime_numbers(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 434 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Dict ={
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple =['PerceiverFeatureExtractor']
SCREAMING_SNAKE_CASE__ : int =['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 434 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(A , A )
return k
def UpperCAmelCase ( A : dict , A : dict ):
'''simple docstring'''
_UpperCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(A )
_UpperCAmelCase = PegasusConfig(**A )
_UpperCAmelCase = PegasusForConditionalGeneration(A )
_UpperCAmelCase = torch_model.model.state_dict()
_UpperCAmelCase = {}
for k, v in tf_weights.items():
_UpperCAmelCase = rename_state_dict_key(A )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.tensor(A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_UpperCAmelCase = mapping['shared.weight']
_UpperCAmelCase = mapping['shared.weight']
_UpperCAmelCase = {k: torch.zeros_like(A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**A )
_UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(A , strict=A )
_UpperCAmelCase = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def UpperCAmelCase ( A : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_UpperCAmelCase = tf.train.list_variables(A )
_UpperCAmelCase = {}
_UpperCAmelCase = ['Adafactor', 'global_step']
for name, shape in tqdm(A , desc='converting tf checkpoint to dict' ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(A , A )
_UpperCAmelCase = array
return tf_weights
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = Path(A ).parent.name
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']['max_position_embeddings']
_UpperCAmelCase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(A )
# convert model
_UpperCAmelCase = get_tf_weights_as_numpy(A )
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase = task_specific_params
_UpperCAmelCase = convert_pegasus(A , A )
torch_model.save_pretrained(A )
_UpperCAmelCase = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(A , Path(A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase = parser.parse_args()
if args.save_dir is None:
lowercase = Path(args.tf_ckpt_path).parent.name
lowercase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCAmelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
a__ : Union[str, Any] = None
a__ : Optional[int] = None
@property
def a ( self : Dict ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowercase , '''feature_size''' ) )
self.assertTrue(hasattr(_lowercase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_lowercase , '''padding_value''' ) )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowercase ) == len(_lowercase ) for x, y in zip(_lowercase , processed_features[input_name] ) ) )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def a ( self : int ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def a ( self : Tuple , _lowercase : Dict=False ):
def _inputs_have_equal_length(_lowercase : List[str] ):
__UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(_lowercase : Any , _lowercase : int ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1E-3 ):
return False
return True
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = self.feat_extract_tester.seq_length_diff
__UpperCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
__UpperCAmelCase = self.feat_extract_tester.min_seq_length
__UpperCAmelCase = self.feat_extract_tester.batch_size
__UpperCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__UpperCAmelCase = feat_extract.pad(_lowercase , padding=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''max_length''' )[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=_lowercase , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__UpperCAmelCase = feat_extract.pad(_lowercase , pad_to_multiple_of=10 )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , pad_to_multiple_of=10 )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowercase , return_tensors='''np''' , )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(all(len(_lowercase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
__UpperCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowercase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__UpperCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def a ( self : Optional[int] , _lowercase : List[str]=False ):
def _inputs_have_equal_length(_lowercase : Dict ):
__UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(_lowercase : Any , _lowercase : Any ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1E-3 ):
return False
return True
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to smallest with np
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_lowercase , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to middle
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowercase , return_tensors='''np''' , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''longest''' , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''longest''' , truncation=_lowercase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''max_length''' , truncation=_lowercase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__UpperCAmelCase = 12
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , truncation=_lowercase , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , )
__UpperCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__UpperCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__UpperCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
def a ( self : str ):
self._check_padding(numpify=_lowercase )
def a ( self : Optional[int] ):
self._check_padding(numpify=_lowercase )
def a ( self : List[Any] ):
self._check_truncation(numpify=_lowercase )
def a ( self : Dict ):
self._check_truncation(numpify=_lowercase )
@require_torch
def a ( self : Tuple ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def a ( self : str ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**_lowercase )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = [len(_lowercase ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowercase )
def a ( self : Any ):
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**_lowercase )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = [len(_lowercase ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = min(_lowercase )
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=_lowercase , truncation=_lowercase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 49 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : int , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = name
SCREAMING_SNAKE_CASE_ : List[Any] = value
SCREAMING_SNAKE_CASE_ : Optional[Any] = weight
def __repr__( self : str):
'''simple docstring'''
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return self.value
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self.name
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return self.weight
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return self.value / self.weight
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(len(__a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sorted(__a , key=__a , reverse=__a )
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _A () -> Optional[Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=None):
if not conversation_id:
__SCREAMING_SNAKE_CASE = uuid.uuida()
if past_user_inputs is None:
__SCREAMING_SNAKE_CASE = []
if generated_responses is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = conversation_id
__SCREAMING_SNAKE_CASE = past_user_inputs
__SCREAMING_SNAKE_CASE = generated_responses
__SCREAMING_SNAKE_CASE = text
def __eq__( self , lowerCAmelCase__):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False):
if self.new_user_input:
if overwrite:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
f"with: \"{text}\".")
__SCREAMING_SNAKE_CASE = text
else:
logger.warning(
f"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
f"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input")
else:
__SCREAMING_SNAKE_CASE = text
def snake_case_ ( self):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
__SCREAMING_SNAKE_CASE = None
def snake_case_ ( self , lowerCAmelCase__):
self.generated_responses.append(lowerCAmelCase__)
def snake_case_ ( self):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self):
__SCREAMING_SNAKE_CASE = f"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__SCREAMING_SNAKE_CASE = """user""" if is_user else """bot"""
output += f"{name} >> {text} \n"
return output
@add_end_docstrings(
UpperCAmelCase__ , R'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
if self.tokenizer.pad_token_id is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if min_length_for_response is not None:
__SCREAMING_SNAKE_CASE = min_length_for_response
if minimum_tokens is not None:
__SCREAMING_SNAKE_CASE = minimum_tokens
if "max_length" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase__)
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=0 , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = super().__call__(lowerCAmelCase__ , num_workers=lowerCAmelCase__ , **lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=3_2):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""")
if conversation.new_user_input is None:
raise ValueError(
f"Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""")
if hasattr(self.tokenizer , """_build_conversation_input_ids"""):
__SCREAMING_SNAKE_CASE = self.tokenizer._build_conversation_input_ids(lowerCAmelCase__)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__SCREAMING_SNAKE_CASE = self._legacy_parse_and_tokenize(lowerCAmelCase__)
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.LongTensor([input_ids])
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=1_0 , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""max_length""" , self.model.config.max_length)
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})")
__SCREAMING_SNAKE_CASE = max_length - minimum_tokens
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__SCREAMING_SNAKE_CASE = model_inputs["""attention_mask"""][:, -trim:]
__SCREAMING_SNAKE_CASE = model_inputs.pop("""conversation""")
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = self.model.generate(**lowerCAmelCase__ , **lowerCAmelCase__)
if self.model.config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=True):
__SCREAMING_SNAKE_CASE = model_outputs["""output_ids"""]
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase__)
return conversation
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__))
if len(lowerCAmelCase__) > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 714 |
"""simple docstring"""
__magic_name__ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__ = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 248 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = KandinskyVaaImgaImgPipeline
lowercase_ = ['image_embeds', 'negative_image_embeds', 'image']
lowercase_ = [
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
return 32
@property
def __lowercase( self : str )-> Any:
"""simple docstring"""
return 32
@property
def __lowercase( self : List[str] )-> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE__ : Dict = UNetaDConditionModel(**a_ )
return model
@property
def __lowercase( self : Any )-> int:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.dummy_unet
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_movq
SCREAMING_SNAKE_CASE__ : List[str] = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DDIMScheduler(**a_ )
SCREAMING_SNAKE_CASE__ : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase( self : Dict , a_ : Any , a_ : Dict=0 )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a_ )
# create init_image
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ).resize((256, 256) )
if str(a_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(a_ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowercase( self : Optional[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'cpu'
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**a_ )
SCREAMING_SNAKE_CASE__ : Any = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**self.get_dummy_inputs(a_ ) )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Dict = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
SCREAMING_SNAKE_CASE__ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
SCREAMING_SNAKE_CASE__ : Tuple = 'A red cartoon frog, 4k'
SCREAMING_SNAKE_CASE__ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
SCREAMING_SNAKE_CASE__ : Any = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Any = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : Any = pipeline(
image=a_ , image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_ , a_ )
| 85 | def _a ( lowercase__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ : str = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ : List[Any] = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : int , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , ) -> Tuple:
__A : str = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__A ,__A : Optional[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
__A : Dict = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__snake_case )
assert base_extractor.is_extractable(__snake_case )
__A : str = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(__snake_case , __snake_case )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__A : Dict = file_path.read_text(encoding='utf-8' )
else:
__A : Dict = output_path.read_text(encoding='utf-8' )
__A : Dict = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , ) -> str:
__A : Dict = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__A : Optional[Any] = input_paths[compression_format]
if input_path is None:
__A : List[str] = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__snake_case )
__A : Union[str, Any] = Extractor.infer_extractor_format(__snake_case )
assert extractor_format is not None
__A : Tuple = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(__snake_case , __snake_case , __snake_case )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__A : Tuple = file_path.read_text(encoding='utf-8' )
else:
__A : Dict = output_path.read_text(encoding='utf-8' )
__A : Union[str, Any] = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : str ) -> Union[str, Any]:
import tarfile
__A : List[Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
__A : Tuple = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
import tarfile
__A : int = tmp_path / 'data_sym_link'
directory.mkdir()
__A : Any = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=__snake_case )
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _lowerCAmelCase ( __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> int:
__A : Optional[int] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__A : Any = insecure_tar_files[insecure_tar_file]
__A : Tuple = tmp_path / 'extracted'
TarExtractor.extract(__snake_case , __snake_case )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__A : Any = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__A : Tuple = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(__snake_case )
assert zipfile.is_zipfile(str(__snake_case ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__snake_case ) # but we're right | 338 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Union[str, Any]:
__A : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
__A : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__A : Union[str, Any] = min(__snake_case , __snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 338 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""BeitFeatureExtractor"""]
lowerCamelCase = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__lowercase =nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
__lowercase =nums[i]
__lowercase =max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 474 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowercase : int = logging.getLogger(__name__)
_lowercase : Union[str, Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowercase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCamelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=__snake_case , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )} , )
lowerCAmelCase = field(
default=__snake_case , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _UpperCamelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(default=__snake_case , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCAmelCase = field(
default=__snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
lowerCAmelCase = field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCAmelCase = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _UpperCAmelCase ( self ) -> Optional[int]:
if self.train_file is not None:
A = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ) -> Any:
"""simple docstring"""
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
A = [json.loads(UpperCamelCase__ ) for line in f.read().splitlines() if (len(UpperCamelCase__ ) > 0 and not line.isspace())]
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
A = {c: dataset[c] for c in dataset.column_names}
A = refs
return Dataset.from_dict(UpperCamelCase__ )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
A = {}
if data_args.train_file is not None:
A = data_args.train_file
if data_args.validation_file is not None:
A = data_args.validation_file
A = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
A = """text"""
A = load_dataset(UpperCamelCase__ , data_files=UpperCamelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
A = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
A = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
A = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
A = AutoModelForMaskedLM.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A = datasets["""train"""].column_names
else:
A = datasets["""validation"""].column_names
A = """text""" if """text""" in column_names else column_names[0]
A = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase__: Union[str, Any] ):
# Remove empty lines
A = [line for line in examples["""text"""] if len(UpperCamelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=data_args.max_seq_length )
A = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A = False
# Data collator
# This one will take care of randomly masking the tokens.
A = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A = model_args.model_name_or_path
else:
A = None
A = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
A = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A = trainer.evaluate()
A = math.exp(eval_output["""eval_loss"""] )
A = perplexity
A = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 546 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _UpperCAmelCase ( self , a__=True , a__=False , a__=False , a__=False , ) -> Optional[Any]:
A = 4
A = 32
A = (32, 32)
A = torch.manual_seed(0 )
A = torch.device(a__ )
A = (batch_size, num_channels) + sizes
A = randn_tensor(a__ , generator=a__ , device=a__ )
A = {"""hidden_states""": hidden_states}
if include_temb:
A = 128
A = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
A = torch.manual_seed(1 )
A = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
A = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
A = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def _UpperCAmelCase ( self ) -> int:
A = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
A = unet_block(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
self.assertEqual(output.shape , self.output_shape )
A = output[0, -1, -3:, -3:]
A = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _UpperCAmelCase ( self ) -> str:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
model.to(a__ )
model.train()
A = model(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
A = torch.device(a__ )
A = randn_tensor(output.shape , device=a__ )
A = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 546 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : dict ) -> set:
"""simple docstring"""
snake_case : int = set()
# edges = list of graph's edges
snake_case : Optional[Any] = get_edges(a_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
snake_case : int = edges.pop()
chosen_vertices.add(a_ )
chosen_vertices.add(a_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(a_ )
return chosen_vertices
def __lowerCAmelCase ( lowercase : dict ) -> set:
"""simple docstring"""
snake_case : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 178 |
'''simple docstring'''
def snake_case ( a_ : list[int] , a_ : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(a_ ) == len(a_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] = equationa
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[str] = equationa
# Calculate the determinants of the matrices
UpperCamelCase_ : Union[str, Any] = aa * ba - aa * ba
UpperCamelCase_ : Dict = ca * ba - ca * ba
UpperCamelCase_ : Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase_ : List[Any] = determinant_x / determinant
UpperCamelCase_ : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 208 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowercase: Dict = logging.getLogger()
def _lowerCamelCase ( ):
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowerCAmelCase = parser.parse_args()
return args.f
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = {}
_lowerCAmelCase = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCAmelCase = json.load(lowercase__ )
else:
raise ValueError(F'can\'t find {path}' )
return results
def _lowerCamelCase ( ):
_lowerCAmelCase = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
_lowercase: Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase__ ( __snake_case ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict ):
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
_lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result['perplexity'] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'translation_no_trainer' ) ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
_lowerCAmelCase = get_results(__UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCamelCase , 'image_classification_no_trainer' ) ) )
| 702 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase__ ( UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( lowercase__ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
raise NotImplementedError()
| 225 | 0 |
"""simple docstring"""
UpperCamelCase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCamelCase_ : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCamelCase_ : Any = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 115 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCAmelCase : Tuple = '''scheduler_config.json'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 1
_lowerCamelCase = 2
_lowerCamelCase = 3
_lowerCamelCase = 4
_lowerCamelCase = 5
@dataclass
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = SCHEDULER_CONFIG_NAME
_lowerCamelCase = ['''dtype''']
_lowerCamelCase = []
_lowerCamelCase = True
@classmethod
def UpperCAmelCase__ ( cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : int = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
snake_case_ , snake_case_ : Dict = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
snake_case_ : Any = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , **_lowercase ) -> Optional[Any]:
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case_ : str = importlib.import_module(__name__.split(""".""" )[0] )
snake_case_ : Optional[int] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __lowerCAmelCase ( __UpperCamelCase : jnp.ndarray , __UpperCamelCase : Tuple[int] ):
'''simple docstring'''
assert len(__UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCamelCase ) - x.ndim) ) , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any=0.999 , __UpperCamelCase : Optional[int]=jnp.floataa ):
'''simple docstring'''
def alpha_bar(__UpperCamelCase : Optional[int] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
snake_case_ : Optional[Any] = []
for i in range(__UpperCamelCase ):
snake_case_ : Dict = i / num_diffusion_timesteps
snake_case_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCamelCase ) / alpha_bar(__UpperCamelCase ) , __UpperCamelCase ) )
return jnp.array(__UpperCamelCase , dtype=__UpperCamelCase )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Any = scheduler.config
if config.trained_betas is not None:
snake_case_ : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
snake_case_ : int = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
snake_case_ : Optional[Any] = 1.0 - betas
snake_case_ : Any = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ : Tuple = state.alphas_cumprod
snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
snake_case_ : Dict = sqrt_alpha_prod.flatten()
snake_case_ : int = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
snake_case_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ : Dict = sqrt_one_minus_alpha_prod.flatten()
snake_case_ : Tuple = broadcast_to_shape_from_left(__UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : str = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( __UpperCamelCase : CommonSchedulerState , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray , __UpperCamelCase : jnp.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = get_sqrt_alpha_prod(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : int=[10, 20, 30, 40] , UpperCAmelCase_ : str=[1, 1, 2, 1] , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=None , ):
"""simple docstring"""
__UpperCAmelCase : int = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Tuple = image_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : int = embeddings_size
__UpperCAmelCase : Any = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : Dict = len(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
__UpperCAmelCase : Any = RegNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Optional[Any] = RegNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : List[str] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = RegNetModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(config=UpperCAmelCase_ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
__UpperCAmelCase : List[str] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Optional[int] = layer_type
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = RegNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 720 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__ : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCAmelCase__ : str = {"facebook/blenderbot_small-90M": 5_12}
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = set()
__UpperCAmelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : str = char
__UpperCAmelCase : Optional[int] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str="__start__" , UpperCAmelCase_ : List[Any]="__end__" , UpperCAmelCase_ : str="__unk__" , UpperCAmelCase_ : int="__null__" , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : Any = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Dict = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__UpperCAmelCase : Dict = {}
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub("([.,!?()])" , R" \1" , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub("(')" , R" \1 " , UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = re.sub(R"\s{2,}" , " " , UpperCAmelCase_ )
if "\n" in token:
__UpperCAmelCase : List[Any] = token.replace("\n" , " __newln__" )
__UpperCAmelCase : str = token.split(" " )
__UpperCAmelCase : int = []
for token in tokens:
if not len(UpperCAmelCase_ ):
continue
__UpperCAmelCase : Any = token.lower()
__UpperCAmelCase : Optional[Any] = tuple(UpperCAmelCase_ )
__UpperCAmelCase : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
if not pairs:
words.append(UpperCAmelCase_ )
continue
while True:
__UpperCAmelCase : Union[str, Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : int = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCAmelCase_ ):
try:
__UpperCAmelCase : str = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
new_word.extend(word[i:j] )
__UpperCAmelCase : Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
__UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
__UpperCAmelCase : int = "@@ ".join(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = word[:-4]
__UpperCAmelCase : Any = word
words.append(UpperCAmelCase_ )
return " ".join(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = re.findall(R"\S+\n?" , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : str = token.lower()
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = " ".join(UpperCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
__UpperCAmelCase : Dict = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 329 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a_ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : Optional[Any] = self.dummy_uncond_unet
a_ : Optional[int] = PNDMScheduler()
a_ : Dict = PNDMPipeline(unet=lowercase__ , scheduler=lowercase__ )
pndm.to(lowercase__ )
pndm.set_progress_bar_config(disable=lowercase__ )
a_ : int = torch.manual_seed(0 )
a_ : Optional[Any] = pndm(generator=lowercase__ , num_inference_steps=20 , output_type="""numpy""" ).images
a_ : List[str] = torch.manual_seed(0 )
a_ : Tuple = pndm(generator=lowercase__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowercase__ )[0]
a_ : Optional[int] = image[0, -3:, -3:, -1]
a_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : Dict = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Tuple = """google/ddpm-cifar10-32"""
a_ : str = UNetaDModel.from_pretrained(lowercase__ )
a_ : str = PNDMScheduler()
a_ : Tuple = PNDMPipeline(unet=lowercase__ , scheduler=lowercase__ )
pndm.to(lowercase__ )
pndm.set_progress_bar_config(disable=lowercase__ )
a_ : int = torch.manual_seed(0 )
a_ : List[Any] = pndm(generator=lowercase__ , output_type="""numpy""" ).images
a_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ : Optional[Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 442 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCAmelCase_ : int = {'target_lang': 'fi', 'source_lang': 'en'}
lowerCAmelCase_ : str = '>>zh<<'
lowerCAmelCase_ : List[str] = 'Helsinki-NLP/'
if is_torch_available():
lowerCAmelCase_ : Dict = 'pt'
elif is_tf_available():
lowerCAmelCase_ : Union[str, Any] = 'tf'
else:
lowerCAmelCase_ : int = 'jax'
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Dict = MarianTokenizer
__magic_name__ : Any = False
__magic_name__ : str = True
def lowercase_ ( self : Any ):
'''simple docstring'''
super().setUp()
a_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
a_ : List[str] = Path(self.tmpdirname )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a_ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : Tuple , **lowercase__ : int ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase_ ( self : int , lowercase__ : int ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Optional[int] = """</s>"""
a_ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase__ ) , 9 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : str = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
a_ : Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
a_ : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowercase__ , batch.input_ids[0] )
a_ : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase__ )
a_ : Union[str, Any] = [x.name for x in Path(lowercase__ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase__ )
MarianTokenizer.from_pretrained(lowercase__ )
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : int = self.get_tokenizer()
a_ : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_tokenizer()
a_ : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : Optional[int] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a_ : Tuple = """Tämä on testi"""
a_ : Union[str, Any] = """This is a test"""
a_ : Union[str, Any] = [76, 7, 2047, 2]
a_ : Optional[int] = [69, 12, 11, 940, 2]
a_ : Optional[Any] = tokenizer(lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : Optional[int] = tokenizer(text_target=lowercase__ ).input_ids
self.assertListEqual(lowercase__ , lowercase__ )
a_ : str = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 442 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCamelCase (_lowerCamelCase : int = 3 )-> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_lowerCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__snake_case = QuantumRegister(_lowerCamelCase , '''qr''' )
__snake_case = ClassicalRegister(_lowerCamelCase , '''cr''' )
__snake_case = QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
__snake_case = number_of_qubits
for i in range(_lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase )
# simulate with 10000 shots
__snake_case = Aer.get_backend('''qasm_simulator''' )
__snake_case = execute(_lowerCamelCase , _lowerCamelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 24 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = True
__lowercase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__SCREAMING_SNAKE_CASE )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case = model(__SCREAMING_SNAKE_CASE )[0]
__snake_case = 5_0000
__snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__snake_case = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
def __lowercase ( _a , _a ):
if digit_amount > 0:
return round(number - int(_a ) , _a )
return number - int(_a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 711 |
"""simple docstring"""
from statistics import mean, stdev
def __lowercase ( _a , _a = 3 ):
snake_case_ : Optional[int] = min(_a )
snake_case_ : str = max(_a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _a ) for x in data]
def __lowercase ( _a , _a = 3 ):
snake_case_ : Any = mean(_a )
snake_case_ : str = stdev(_a )
# standardize data
return [round((x - mu) / (sigma) , _a ) for x in data]
| 485 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(lowercase_ , lowercase_="" , lowercase_="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(lowercase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase_ , lowercase_ , sep=lowercase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase_ )
A__ = argparse.Namespace()
with open(lowercase_ , '''r''' ) as yaml_file:
try:
A__ = yaml.load(lowercase_ , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(lowercase_ )
for k, v in flat_cfg.items():
setattr(lowercase_ , lowercase_ , lowercase_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowercase_ , str(lowercase_ ) ) )
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
A__ = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
A__ = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
A__ = 151
A__ = 512
A__ = '''ade20k-id2label.json'''
A__ = True
elif task_name.startswith('''voc_''' ):
A__ = 21
A__ = 512
A__ = '''pascal-voc-id2label.json'''
A__ = True
# orig_config
A__ = load_orig_config_file(lowercase_ )
assert getattr(lowercase_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(lowercase_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(lowercase_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(lowercase_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(lowercase_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = dct.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
"""simple docstring"""
if base_model:
A__ = ''''''
else:
A__ = '''mobilevitv2.'''
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
A__ = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
A__ = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
A__ = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
A__ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
A__ = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
A__ = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
A__ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
A__ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
A__ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
A__ = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
A__ = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
A__ = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
A__ = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowercase_ )
for k in keys_to_ignore:
state_dict.pop(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = get_mobilevitva_config(lowercase_ , lowercase_ )
# load original state_dict
A__ = torch.load(lowercase_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
A__ = MobileViTVaForSemanticSegmentation(lowercase_ ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(lowercase_ ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(lowercase_ )
A__ = create_rename_keys(lowercase_ , base_model=lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# load modified state_dict
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**lowercase_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 87 | """simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : Optional[int] = file_path.read_text(encoding="""utf-8""" )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
_SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
_SCREAMING_SNAKE_CASE : Tuple = input_paths[compression_format]
if input_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_SCREAMING_SNAKE_CASE : Tuple = file_path.read_text(encoding="""utf-8""" )
else:
_SCREAMING_SNAKE_CASE : str = output_path.read_text(encoding="""utf-8""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any:
import tarfile
_SCREAMING_SNAKE_CASE : Any = tmp_path / """data_dot_dot"""
directory.mkdir()
_SCREAMING_SNAKE_CASE : Optional[int] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict:
import tarfile
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """data_sym_link"""
directory.mkdir()
_SCREAMING_SNAKE_CASE : Optional[int] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
_SCREAMING_SNAKE_CASE : int = insecure_tar_files[insecure_tar_file]
_SCREAMING_SNAKE_CASE : str = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_SCREAMING_SNAKE_CASE : List[str] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
_SCREAMING_SNAKE_CASE : Any = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 338 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Any = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 121 | """simple docstring"""
from __future__ import annotations
import math
def __a ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : str = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase__ : List[str] = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 556 |
def lowerCAmelCase_ (lowerCAmelCase__: list ):
"""simple docstring"""
if len(lowerCAmelCase__ ) <= 1:
return [tuple(lowerCAmelCase__ )]
UpperCAmelCase_: List[Any] = []
def generate(lowerCAmelCase__: int , lowerCAmelCase__: list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase__ )
generate(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
return res
if __name__ == "__main__":
a : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 556 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_downsample:
UpperCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
"""simple docstring"""
UpperCamelCase = ()
for resnet in self.resnets:
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCamelCase = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = True
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.num_layers ):
UpperCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCamelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
if self.add_upsample:
UpperCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCamelCase = res_hidden_states_tuple[-1]
UpperCamelCase = res_hidden_states_tuple[:-1]
UpperCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCamelCase = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 0.0
snake_case__ = 1
snake_case__ = 1
snake_case__ = False
snake_case__ = False
snake_case__ = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCamelCase = []
for _ in range(self.num_layers ):
UpperCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnets
UpperCamelCase = attentions
def __call__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str=True ):
"""simple docstring"""
UpperCamelCase = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCamelCase = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
UpperCamelCase = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states
| 410 |
from __future__ import annotations
__magic_name__ : str = []
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase)):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase , -1 , -1) , range(_UpperCamelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase , -1 , -1) , range(_UpperCamelCase , len(_UpperCamelCase))):
if board[i][j] == 1:
return False
return True
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
if row >= len(_UpperCamelCase):
solution.append(_UpperCamelCase)
printboard(_UpperCamelCase)
print()
return True
for i in range(len(_UpperCamelCase)):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
UpperCamelCase = 1
solve(_UpperCamelCase , row + 1)
UpperCamelCase = 0
return False
def lowercase__ ( _UpperCamelCase) -> None:
"""simple docstring"""
for i in range(len(_UpperCamelCase)):
for j in range(len(_UpperCamelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
__magic_name__ : List[Any] = 8
__magic_name__ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 410 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : Dict , a_ : Optional[int] )-> Dict:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def __lowercase( self : Optional[int] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'sgugger/tiny-distilbert-classification'
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , torchscript=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , fpaa=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : int )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a_ , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase( self : Dict )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2'
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tinier_bart'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(a_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(a_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(a_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(a_ , 'env.csv' ) , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , 'env.csv' ) ).exists() )
def __lowercase( self : List[str] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a_ : Tuple ):
self.assertTrue(hasattr(a_ , 'sequential' ) )
self.assertTrue(hasattr(a_ , 'cumulative' ) )
self.assertTrue(hasattr(a_ , 'current' ) )
self.assertTrue(hasattr(a_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , 'log.txt' ) , log_print=a_ , trace_memory_line_by_line=a_ , multi_process=a_ , )
SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmark(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(a_ , 'log.txt' ) ).exists() )
| 85 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['pixel_values']
def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_pad
SCREAMING_SNAKE_CASE__ : Any = pad_size
def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray:
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ )
def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images]
SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 85 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : str=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=99 , __UpperCamelCase : Tuple=36 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : List[Any]=37 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Optional[int]=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Optional[Any]=6 , __UpperCamelCase : Optional[int]=6 , __UpperCamelCase : Any=3 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=1_000 , ) -> Dict:
A = parent
A = batch_size
A = num_channels
A = image_size
A = patch_size
A = text_seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = coordinate_size
A = shape_size
A = num_labels
A = num_choices
A = scope
A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A = text_seq_length
A = (image_size // patch_size) ** 2 + 1
A = self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self : Tuple ) -> Tuple:
A = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A = bbox[i, j, 3]
A = bbox[i, j, 1]
A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A = bbox[i, j, 2]
A = bbox[i, j, 0]
A = t
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.text_seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any ) -> Any:
A = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
A = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
A = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
A = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
A = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> Optional[Any]:
A = self.num_labels
A = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ) -> Tuple:
A = self.num_labels
A = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ) -> Union[str, Any]:
A = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
A_ : Dict = False
A_ : str = False
A_ : List[str] = False
A_ : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Optional[int] = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : str , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ) -> Any:
return True
def __UpperCamelCase ( self : Tuple ) -> str:
A = LayoutLMvaModelTester(self )
A = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __UpperCamelCase ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=False ) -> Any:
A = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
A = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
A = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCamelCase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : Any ) -> str:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
A = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
A = torch.tensor([[1, 2]] )
A = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
A = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
A = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) ) | 700 |
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
A = grid[row_n]
A = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
A = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 224 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCAmelCase : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCAmelCase : int = typing.Union[np.floataa, int, float] # noqa: UP007
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return np.sqrt(np.sum((np.asarray(_lowerCAmelCase ) - np.asarray(_lowerCAmelCase )) ** 2 ) )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return sum((va - va) ** 2 for va, va in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A_ ( ):
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 671 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 384}
A : Optional[Any] = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Optional[Any] = do_resize
A : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A : Dict = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : List[str] = do_rescale
A : Tuple = rescale_factor
A : Optional[int] = do_normalize
A : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Tuple = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
A : List[str] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : int = int(shortest_edge / crop_pct )
A : List[Any] = get_resize_output_image_size(lowerCamelCase__, size=lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__, size=(shortest_edge, shortest_edge), data_format=lowerCamelCase__, **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__, size=(shortest_edge, shortest_edge), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ):
return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
A : str = resample if resample is not None else self.resample
A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : List[str] = image_mean if image_mean is not None else self.image_mean
A : Optional[Any] = image_std if image_std is not None else self.image_std
A : Optional[Any] = size if size is not None else self.size
A : str = get_size_dict(lowerCamelCase__, default_to_square=lowerCamelCase__ )
A : Any = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
A : Any = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, crop_pct=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : str = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images]
A : Tuple = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 662 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =None
UpperCamelCase__ =None
UpperCamelCase__ =None
UpperCamelCase__ =None
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Dict=512 , lowerCamelCase_ : int="cls" , lowerCamelCase_ : Dict=False , lowerCamelCase_ : List[Any]=True , **lowerCamelCase_ : Optional[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Dict = project_dim
__magic_name__ : int = pooler_fn
__magic_name__ : str = learn_encoder
__magic_name__ : Optional[int] = use_attention_mask
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
UpperCamelCase__ =[R'''pooler''', R'''logit_scale''']
UpperCamelCase__ =[R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase__ ='''roberta'''
UpperCamelCase__ =RobertaSeriesConfig
def __init__( self : str , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
__magic_name__ : Tuple = XLMRobertaModel(lowerCamelCase_ )
__magic_name__ : Any = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ : Optional[int] = getattr(lowerCamelCase_ , '''has_pre_transformation''' , lowerCamelCase_ )
if self.has_pre_transformation:
__magic_name__ : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__ : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[bool] = None , ) -> List[Any]:
__magic_name__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__ : Any = self.base_model(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCamelCase_ , )
if self.has_pre_transformation:
__magic_name__ : Any = outputs['''hidden_states'''][-2]
__magic_name__ : Dict = self.pre_LN(lowerCamelCase_ )
__magic_name__ : int = self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__magic_name__ : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 501 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =inspect.getfile(accelerate.test_utils )
UpperCamelCase__ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ =['''accelerate''', '''launch''']
UpperCamelCase__ =Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ ='''default_config.yaml'''
UpperCamelCase__ =config_folder / config_file
UpperCamelCase__ =config_folder / '''_default_config.yaml'''
UpperCamelCase__ =Path('''tests/test_configs''' )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ) -> int:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[str]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=lowerCamelCase_ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(lowerCamelCase_ ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ ='''test-tpu'''
UpperCamelCase__ ='''us-central1-a'''
UpperCamelCase__ ='''ls'''
UpperCamelCase__ =['''accelerate''', '''tpu-config''']
UpperCamelCase__ ='''cd /usr/share'''
UpperCamelCase__ ='''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ ='''Running gcloud compute tpus tpu-vm ssh'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : int = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=lowerCamelCase_ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
__magic_name__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : str ) -> Dict:
__magic_name__ : Tuple = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
__magic_name__ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=lowerCamelCase_ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCamelCase_ , )
| 501 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] = 1_6
_lowercase : List[Any] = 3_2
def snake_case__ ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 , __lowerCamelCase : str = "bert-base-cased" ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Union[str, Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ : Optional[Any] =datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : List[str] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowerCamelCase__ : Dict =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
# Initialize accelerator
lowerCamelCase__ : int =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[int] =config['''lr''']
lowerCamelCase__ : Optional[Any] =int(config['''num_epochs'''] )
lowerCamelCase__ : str =int(config['''seed'''] )
lowerCamelCase__ : Dict =int(config['''batch_size'''] )
lowerCamelCase__ : Optional[Any] =args.model_name_or_path
set_seed(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
# Instantiate optimizer
lowerCamelCase__ : int =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ : List[str] =optimizer_cls(params=model.parameters() , lr=__lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ : Optional[int] =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase__ : Dict =1
lowerCamelCase__ : str =(len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ : Dict =get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , )
else:
lowerCamelCase__ : Dict =DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int =accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : Optional[int] =0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ : Union[str, Any] =0
# Now we train the model
lowerCamelCase__ : str =evaluate.load('''glue''' , '''mrpc''' )
lowerCamelCase__ : Any =0
lowerCamelCase__ : str ={}
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple =model(**__lowerCamelCase )
lowerCamelCase__ : Any =outputs.loss
lowerCamelCase__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(__lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCamelCase ) - 1:
lowerCamelCase__ : str =predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowerCamelCase__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase )
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCamelCase__ : Any =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : str =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCamelCase , )
parser.add_argument(
'''--output_dir''' , type=__lowerCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=__lowerCamelCase , default=__lowerCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=__lowerCamelCase , default=3 , help='''Number of train epochs.''' , )
lowerCamelCase__ : int =parser.parse_args()
lowerCamelCase__ : Union[str, Any] ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 625 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 128022
UpperCamelCase_ = 128028
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = MaMaaaTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowercase : List[Any] =dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase : List[Any] =Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase : Tuple =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Any , **UpperCAmelCase__ : int ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Tuple ='''</s>'''
lowercase : Union[str, Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.get_tokenizer()
lowercase : Optional[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =self.get_tokenizer()
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2, 3, 4, 5, 6] , )
lowercase : Optional[int] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowercase : Tuple =tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , '''This is a test''' )
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
# fmt: off
lowercase : int ={'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = 'facebook/m2m100_418M'
lowerCamelCase_ = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase_ = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase_ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
lowercase : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowercase : Optional[int] =1
return cls
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128063 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] ='''en'''
lowercase : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
lowercase : str =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase : Optional[Any] =self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
lowercase : Optional[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
lowercase : Tuple =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase : Union[str, Any] =MaMaaaTokenizer.from_pretrained(UpperCAmelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase__ )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] ='''en'''
lowercase : int ='''fr'''
lowercase : Union[str, Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , return_tensors='''pt''' )
lowercase : str =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase : int =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase : Union[str, Any] ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase : Optional[Any] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 92 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=A, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=A, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
_UpperCAmelCase = import_module('tasks' )
try:
_UpperCAmelCase = getattr(A , model_args.task_type )
_UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
_UpperCAmelCase = dict(enumerate(A ) )
_UpperCAmelCase = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A : np.ndarray , A : np.ndarray ) -> Tuple[List[int], List[int]]:
_UpperCAmelCase = np.argmax(A , axis=2 )
_UpperCAmelCase , _UpperCAmelCase = preds.shape
_UpperCAmelCase = [[] for _ in range(A )]
_UpperCAmelCase = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A : EvalPrediction ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
_UpperCAmelCase = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
_UpperCAmelCase = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = trainer.predict(A )
_UpperCAmelCase , _UpperCAmelCase = align_predictions(A , A )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , A , A )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(A , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def UpperCAmelCase ( A : str ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list:
_snake_case : Optional[Any] = length or len(SCREAMING_SNAKE_CASE__ )
_snake_case : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_snake_case , _snake_case : List[str] = list_data[i + 1], list_data[i]
_snake_case : Optional[int] = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
_snake_case : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
_snake_case : Any = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> Dict:
"""simple docstring"""
_snake_case : int = nlp
_snake_case : Dict = reader
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase : ArgumentParser) -> Any:
"""simple docstring"""
_snake_case : Any = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""")
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""")
run_parser.add_argument("""--input""" , type=lowerCAmelCase , help="""Path to the file to use for inference""")
run_parser.add_argument("""--output""" , type=lowerCAmelCase , help="""Path to the file that will be used post to write results.""")
run_parser.add_argument("""--model""" , type=lowerCAmelCase , help="""Name or path to the model to instantiate.""")
run_parser.add_argument("""--config""" , type=lowerCAmelCase , help="""Name or path to the model's config to instantiate.""")
run_parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase , help="""Name of the tokenizer to use. (default: same as the model name)""")
run_parser.add_argument(
"""--column""" , type=lowerCAmelCase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCAmelCase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""")
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_snake_case , _snake_case : int = self._nlp, []
for entry in self._reader:
_snake_case : List[Any] = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : Any = self._reader.save_binary(lowerCAmelCase)
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 477 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCamelCase =["small", "medium", "large"]
_lowerCamelCase ="lm_head.decoder.weight"
_lowerCamelCase ="lm_head.weight"
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =d.pop(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_, os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_lowerCamelCase =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCamelCase =os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
_lowerCamelCase =f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 252 |
from ...processing_utils import ProcessorMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['image_processor', 'feature_extractor']
__UpperCAmelCase = 'TvltImageProcessor'
__UpperCAmelCase = 'TvltFeatureExtractor'
def __init__( self : Optional[int] ,snake_case : List[str] ,snake_case : Dict ):
super().__init__(image_processor=snake_case ,feature_extractor=snake_case )
SCREAMING_SNAKE_CASE =image_processor
SCREAMING_SNAKE_CASE =feature_extractor
def __call__( self : Dict ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]=None ,snake_case : List[Any]=None ,snake_case : int=None ,snake_case : List[Any]=False ,snake_case : Optional[int]=False ,*snake_case : int ,**snake_case : str ,):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE =None
if images is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,mask_pixel=snake_case ,*snake_case ,**snake_case )
if images_mixed is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,is_mixed=snake_case ,*snake_case ,**snake_case )
if audio is not None:
SCREAMING_SNAKE_CASE =self.feature_extractor(
snake_case ,*snake_case ,sampling_rate=snake_case ,mask_audio=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE ={}
if audio is not None:
output_dict.update(snake_case )
if images is not None:
output_dict.update(snake_case )
if images_mixed_dict is not None:
output_dict.update(snake_case )
return output_dict
@property
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
SCREAMING_SNAKE_CASE =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 252 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any]=False ):
_UpperCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Union[str, Any] = ""
else:
_UpperCAmelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : List[Any] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Optional[int] = dct.pop(__a )
_UpperCAmelCase : str = val
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=True ):
_UpperCAmelCase : Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase : Any = 8
# set labels if required
if not base_model:
_UpperCAmelCase : str = 1000
_UpperCAmelCase : Tuple = "huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase : List[Any] = {int(__a ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase : Dict = 384
_UpperCAmelCase : Tuple = 1536
_UpperCAmelCase : str = 12
_UpperCAmelCase : Union[str, Any] = 6
# load original model from torch hub
_UpperCAmelCase : List[Any] = torch.hub.load('''facebookresearch/dino:main''' , __a )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(__a )
_UpperCAmelCase : Any = create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
if base_model:
_UpperCAmelCase : List[str] = ViTModel(__a , add_pooling_layer=__a ).eval()
else:
_UpperCAmelCase : List[str] = ViTForImageClassification(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase : Optional[int] = ViTImageProcessor()
_UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCAmelCase : List[str] = encoding["pixel_values"]
_UpperCAmelCase : List[Any] = model(__a )
if base_model:
_UpperCAmelCase : List[Any] = original_model(__a )
assert torch.allclose(__a , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase : List[str] = original_model(__a )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1E-3 )
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase :int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 506 |
import math
from datetime import datetime, timedelta
def UpperCamelCase_ ( __a ) -> datetime:
a__ : Union[str, Any] = year % 19
a__ : List[str] = year % 4
a__ : str = year % 7
a__ : Any = math.floor(year / 100 )
a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a__ : Optional[int] = leap_day_inhibits / 4
a__ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a__ : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__a , 4 , 18 )
else:
return datetime(__a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 37 | 0 |
from __future__ import annotations
import math
_lowerCamelCase : int = '2020.9.26'
_lowerCamelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def _lowerCAmelCase ( __magic_name__ :float , __magic_name__ :float , __magic_name__ :float , __magic_name__ :float , __magic_name__ :float ):
if not all(isinstance(__magic_name__ , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(__magic_name__ )
UpperCAmelCase_ = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _lowerCAmelCase ( __magic_name__ :float , __magic_name__ :float , __magic_name__ :float , __magic_name__ :str , __magic_name__ :float ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ = locals()
del input_variables["axis"]
if not all(isinstance(__magic_name__ , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ = (
'''Input values except axis must either be float or int: '''
F'''{list(input_variables.values() )}'''
)
raise TypeError(__magic_name__ )
UpperCAmelCase_ = (angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
UpperCAmelCase_ = x * math.cos(__magic_name__ ) - y * math.sin(__magic_name__ )
UpperCAmelCase_ = y * math.cos(__magic_name__ ) + x * math.sin(__magic_name__ )
UpperCAmelCase_ = z
elif axis == "x":
UpperCAmelCase_ = y * math.cos(__magic_name__ ) - z * math.sin(__magic_name__ )
UpperCAmelCase_ = z * math.cos(__magic_name__ ) + y * math.sin(__magic_name__ )
UpperCAmelCase_ = x
elif axis == "y":
UpperCAmelCase_ = x * math.cos(__magic_name__ ) - z * math.sin(__magic_name__ )
UpperCAmelCase_ = z * math.cos(__magic_name__ ) + x * math.sin(__magic_name__ )
UpperCAmelCase_ = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 407 |
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0 ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 407 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Any= data
SCREAMING_SNAKE_CASE__: Node | None= None
class _lowerCamelCase :
def __init__( self ) -> Any:
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Any= None
def __iter__( self ) -> Iterator[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE__: Optional[int]= node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> List[Any]:
return "->".join(str(lowerCAmelCase ) for item in iter(self ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
self.insert_nth(len(self ) , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
self.insert_nth(0 , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= Node(lowerCAmelCase )
if self.head is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= new_node # first node points itself
SCREAMING_SNAKE_CASE__: Optional[int]= new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.head
SCREAMING_SNAKE_CASE__: Any= new_node
else:
SCREAMING_SNAKE_CASE__: Dict= self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= temp.next
SCREAMING_SNAKE_CASE__: Optional[Any]= temp.next
SCREAMING_SNAKE_CASE__: List[Any]= new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE__: Tuple= new_node
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.delete_nth(0 )
def UpperCamelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self , lowerCAmelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
SCREAMING_SNAKE_CASE__: Any= self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE__: str= None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE__: int= self.tail.next.next
SCREAMING_SNAKE_CASE__: Dict= self.head.next
else:
SCREAMING_SNAKE_CASE__: Dict= self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE__: Any= temp.next
SCREAMING_SNAKE_CASE__: List[str]= temp.next
SCREAMING_SNAKE_CASE__: int= temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE__: List[str]= temp
return delete_node.data
def UpperCamelCase_ ( self ) -> bool:
return len(self ) == 0
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= CircularLinkedList()
assert len(snake_case_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case_ ) == i
circular_linked_list.insert_nth(snake_case_ , i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowercase__ :List[Any] = pytest.mark.integration
lowercase__ :Optional[Any] = {"comet"}
lowercase__ :Any = importlib.util.find_spec("fairseq") is not None
lowercase__ :Optional[Any] = {"code_eval"}
lowercase__ :List[str] = os.name == "nt"
lowercase__ :Union[str, Any] = {"bertscore", "frugalscore", "perplexity"}
lowercase__ :int = importlib.util.find_spec("transformers") is not None
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''\"test requires Fairseq\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''\"test requires transformers\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def wrapper(self , lowerCAmelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''\"test not supported on Windows\"''' )
else:
test_case(self , SCREAMING_SNAKE_CASE_ )
return wrapper
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@local
class lowercase ( parameterized.TestCase ):
lowercase_ : Tuple ={}
lowercase_ : Optional[int] =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''')
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''')
def A__ ( self ,A__):
lowercase = '''[...]'''
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' ,A__)).module_path)
lowercase = datasets.load.import_main_class(metric_module.__name__ ,dataset=A__)
# check parameters
lowercase = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(A__ ,metric_module.__name__):
with self.use_local_metrics():
try:
lowercase = doctest.testmod(A__ ,verbose=A__ ,raise_on_error=A__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed ,0)
self.assertGreater(results.attempted ,1)
@slow
def A__ ( self ,A__):
lowercase = '''[...]'''
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' ,A__)).module_path)
# run doctest
with self.use_local_metrics():
lowercase = doctest.testmod(A__ ,verbose=A__ ,raise_on_error=A__)
self.assertEqual(results.failed ,0)
self.assertGreater(results.attempted ,1)
@contextmanager
def A__ ( self ,A__ ,A__):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A__):
yield
else:
yield
@contextmanager
def A__ ( self):
def load_local_metric(A__ ,*A__ ,**A__):
return load_metric(os.path.join('''metrics''' ,A__) ,*A__ ,**A__)
with patch('''datasets.load_metric''') as mock_load_metric:
lowercase = load_local_metric
yield
@classmethod
def A__ ( cls ,A__):
def wrapper(A__):
lowercase = contextmanager(A__)
lowercase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowercase ( _lowerCamelCase ):
def A__ ( self ,A__):
assert len(input_dict['''input_ids''']) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowercase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import torch
def bert_cos_score_idf(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowercase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def load_from_checkpoint(lowerCAmelCase__ ):
class lowercase :
def A__ ( self ,A__ ,*A__ ,**A__):
assert len(A__) == 2
lowercase = [0.19, 0.92]
return scores, sum(A__) / len(A__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowercase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowercase = load_from_checkpoint
yield
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
lowercase = '''ERROR'''
lowercase = f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'
with pytest.raises(SCREAMING_SNAKE_CASE_ , match=re.escape(SCREAMING_SNAKE_CASE_ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE_ )
| 700 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 633 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 , UpperCamelCase__ = 10 ) -> int:
__lowerCamelCase = defaultdict(UpperCamelCase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowerCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowerCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 546 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ = 16
A_ = 32
def A_ ( snake_case , snake_case = 16 , snake_case = "bert-base-cased" ):
SCREAMING_SNAKE_CASE:str = AutoTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE:str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE:int = datasets.map(
snake_case , batched=snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE:Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE:Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
def A_ ( snake_case , snake_case ):
# Initialize accelerator
SCREAMING_SNAKE_CASE:int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE:Union[str, Any] = config["lr"]
SCREAMING_SNAKE_CASE:Union[str, Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE:Dict = int(config["seed"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE:Dict = args.model_name_or_path
set_seed(snake_case )
SCREAMING_SNAKE_CASE:str = get_dataloaders(snake_case , snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE:Any = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case )
# Instantiate optimizer
SCREAMING_SNAKE_CASE:int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE:Tuple = optimizer_cls(params=model.parameters() , lr=snake_case )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE:Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = 1
SCREAMING_SNAKE_CASE:List[Any] = (len(snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE:Optional[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , )
else:
SCREAMING_SNAKE_CASE:Any = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE:Dict = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE:Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE:str = 0
# Now we train the model
SCREAMING_SNAKE_CASE:Optional[int] = evaluate.load("glue" , "mrpc" )
SCREAMING_SNAKE_CASE:Any = 0
SCREAMING_SNAKE_CASE:Any = {}
for epoch in range(snake_case , snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = model(**snake_case )
SCREAMING_SNAKE_CASE:str = outputs.loss
SCREAMING_SNAKE_CASE:Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE:Tuple = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE:str = model(**snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE:int = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case ) - 1:
SCREAMING_SNAKE_CASE:int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE:Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case , references=snake_case , )
SCREAMING_SNAKE_CASE:List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case )
SCREAMING_SNAKE_CASE:int = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE:List[Any] = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(snake_case , snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case , )
parser.add_argument(
"--output_dir" , type=snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=snake_case , default=snake_case , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=snake_case , default=3 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE:List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE:List[str] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
A_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
A_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE:Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE:str = numpy_to_pil(snake_case )
return images
def A_ ( snake_case ):
if images.ndim == 3:
SCREAMING_SNAKE_CASE:List[str] = images[None, ...]
SCREAMING_SNAKE_CASE:List[Any] = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE:Any = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
SCREAMING_SNAKE_CASE:int = [Image.fromarray(snake_case ) for image in images]
return pil_images
| 465 | 0 |
import os
import pytest
from attr import dataclass
lowercase_ : Optional[Any] = 'us-east-1' # defaults region
@dataclass
class _lowerCamelCase :
__a = 42
__a = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__a = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
__a = {**hyperparameters, "max_steps": 1000}
@property
def UpperCamelCase_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase_ ( self ) -> str:
return f'{self.framework}-transfromers-test'
@property
def UpperCamelCase_ ( self ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def UpperCamelCase_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Dict= SageMakerTestEnvironment(framework=request.cls.framework )
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCAmelCase( a__ : str , a__ : Dict , a__ : List[Any] ):
'''simple docstring'''
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
def lowerCAmelCase( a__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
return new_state_dict
def lowerCAmelCase( a__ : Dict , a__ : List[Any]=False ):
'''simple docstring'''
lowerCamelCase__ = ""
if is_panoptic:
lowerCamelCase__ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[:256, :]
lowerCamelCase__ = in_proj_bias[:256]
lowerCamelCase__ = in_proj_weight[256:512, :]
lowerCamelCase__ = in_proj_bias[256:512]
lowerCamelCase__ = in_proj_weight[-256:, :]
lowerCamelCase__ = in_proj_bias[-256:]
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase( a__ : List[Any] , a__ : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ = "resnet101"
if "dc5" in model_name:
lowerCamelCase__ = True
lowerCamelCase__ = "panoptic" in model_name
if is_panoptic:
lowerCamelCase__ = 250
else:
lowerCamelCase__ = 91
lowerCamelCase__ = "huggingface/label-files"
lowerCamelCase__ = "coco-detection-id2label.json"
lowerCamelCase__ = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ = {int(a__ ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase__ = ConditionalDetrImageProcessor(format=a__ )
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=a__ , return_tensors="pt" )
lowerCamelCase__ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
lowerCamelCase__ = torch.hub.load("DeppMeng/ConditionalDETR" , a__ , pretrained=a__ ).eval()
lowerCamelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ = "conditional_detr." + src
rename_key(a__ , a__ , a__ )
lowerCamelCase__ = rename_backbone_keys(a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ , is_panoptic=a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCamelCase__ = state_dict.pop(a__ )
lowerCamelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ = ConditionalDetrForSegmentation(a__ ) if is_panoptic else ConditionalDetrForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
model.push_to_hub(repo_id=a__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
lowerCamelCase__ = conditional_detr(a__ )
lowerCamelCase__ = model(a__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 426 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype =jnp.floataa
__lowerCAmelCase : bool =True
def __UpperCAmelCase ( self):
super().setup()
lowerCamelCase__ = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = super().__call__(*UpperCamelCase , **UpperCamelCase)
lowerCamelCase__ = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase( a__ : List[str] , a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : str , a__ : Optional[int] ):
'''simple docstring'''
def cross_entropy(a__ : Union[str, Any] , a__ : Any , a__ : List[Any]=None ):
lowerCamelCase__ = logits.shape[-1]
lowerCamelCase__ = (labels[..., None] == jnp.arange(a__ )[None]).astype("f4" )
lowerCamelCase__ = jax.nn.log_softmax(a__ , axis=-1 )
lowerCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__ = reduction(a__ )
return loss
lowerCamelCase__ = partial(a__ , reduction=jnp.mean )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
lowerCamelCase__ = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : str ="google/bigbird-roberta-base"
__lowerCAmelCase : int =3_0_0_0
__lowerCAmelCase : int =1_0_5_0_0
__lowerCAmelCase : int =1_2_8
__lowerCAmelCase : int =3
__lowerCAmelCase : int =1
__lowerCAmelCase : int =5
# tx_args
__lowerCAmelCase : float =3e-5
__lowerCAmelCase : float =0.0
__lowerCAmelCase : int =2_0_0_0_0
__lowerCAmelCase : float =0.0095
__lowerCAmelCase : str ="bigbird-roberta-natural-questions"
__lowerCAmelCase : str ="training-expt"
__lowerCAmelCase : str ="data/nq-training.jsonl"
__lowerCAmelCase : str ="data/nq-validation.jsonl"
def __UpperCAmelCase ( self):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase)
lowerCamelCase__ = os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : int =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase):
lowerCamelCase__ = self.collate_fn(UpperCamelCase)
lowerCamelCase__ = jax.tree_util.tree_map(UpperCamelCase , UpperCamelCase)
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ , lowerCamelCase__ = self.fetch_inputs(features["input_ids"])
lowerCamelCase__ = {
"input_ids": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCamelCase , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [self._fetch_inputs(UpperCamelCase) for ids in input_ids]
return zip(*UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = [1 for _ in range(len(UpperCamelCase))]
while len(UpperCamelCase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , a__ : Any=None ):
'''simple docstring'''
if seed is not None:
lowerCamelCase__ = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
lowerCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : List[Any] , a__ : Tuple , **a__ : Union[str, Any] ):
'''simple docstring'''
def loss_fn(a__ : str ):
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(a__ )
lowerCamelCase__ = jax.value_and_grad(a__ )
lowerCamelCase__ , lowerCamelCase__ = grad_fn(state.params )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__ = jax.lax.pmean(a__ , "batch" )
lowerCamelCase__ = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase( a__ : int , **a__ : Tuple ):
'''simple docstring'''
lowerCamelCase__ = model_inputs.pop("start_labels" )
lowerCamelCase__ = model_inputs.pop("end_labels" )
lowerCamelCase__ = model_inputs.pop("pooled_labels" )
lowerCamelCase__ = state.apply_fn(**a__ , params=state.params , train=a__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
lowerCamelCase__ = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class snake_case_ ( train_state.TrainState ):
"""simple docstring"""
__lowerCAmelCase : Callable =struct.field(pytree_node=A__ )
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable =None
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None):
lowerCamelCase__ = model.params
lowerCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , loss_fn=UpperCamelCase , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = restore_checkpoint(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__ = build_tx(**UpperCamelCase)
lowerCamelCase__ = train_state.TrainState(
step=UpperCamelCase , apply_fn=model.__call__ , params=UpperCamelCase , tx=UpperCamelCase , opt_state=UpperCamelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = data_collator
lowerCamelCase__ = lr
lowerCamelCase__ = params
lowerCamelCase__ = jax_utils.replicate(UpperCamelCase)
return state
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = self.args
lowerCamelCase__ = len(UpperCamelCase) // args.batch_size
lowerCamelCase__ = jax.random.PRNGKey(0)
lowerCamelCase__ = jax.random.split(UpperCamelCase , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , args.batch_size , seed=UpperCamelCase)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc=f"""Running EPOCH-{epoch}"""):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.train_step_fn(UpperCamelCase , UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__ = jax_utils.unreplicate(state.step)
lowerCamelCase__ = running_loss.item() / i
lowerCamelCase__ = self.scheduler_fn(state_step - 1)
lowerCamelCase__ = self.evaluate(UpperCamelCase , UpperCamelCase)
lowerCamelCase__ = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCamelCase))
self.logger.log(UpperCamelCase , commit=UpperCamelCase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = get_batched_dataset(UpperCamelCase , self.args.batch_size)
lowerCamelCase__ = len(UpperCamelCase) // self.args.batch_size
lowerCamelCase__ = jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__ = 0
for batch in tqdm(UpperCamelCase , total=UpperCamelCase , desc="Evaluating ... "):
lowerCamelCase__ = self.data_collator(UpperCamelCase)
lowerCamelCase__ = self.val_step_fn(UpperCamelCase , **UpperCamelCase)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = jax_utils.unreplicate(UpperCamelCase)
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCamelCase , params=state.params)
with open(os.path.join(UpperCamelCase , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCamelCase , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCamelCase , "data_collator.joblib"))
with open(os.path.join(UpperCamelCase , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCamelCase)
print("DONE")
def lowerCAmelCase( a__ : str , a__ : Optional[int] ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(a__ , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__ = from_bytes(state.opt_state , f.read() )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "args.joblib" ) )
lowerCamelCase__ = joblib.load(os.path.join(a__ , "data_collator.joblib" ) )
with open(os.path.join(a__ , "training_state.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(a__ )
lowerCamelCase__ = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase( a__ : Union[str, Any] , a__ : List[str] , a__ : Tuple , a__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ = num_train_steps - warmup_steps
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
lowerCamelCase__ = optax.linear_schedule(init_value=a__ , end_value=1E-7 , transition_steps=a__ )
lowerCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase( a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : List[str] , a__ : Union[str, Any] ):
'''simple docstring'''
def weight_decay_mask(a__ : str ):
lowerCamelCase__ = traverse_util.flatten_dict(a__ )
lowerCamelCase__ = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
lowerCamelCase__ = scheduler_fn(a__ , a__ , a__ , a__ )
lowerCamelCase__ = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 426 | 1 |
"""simple docstring"""
import qiskit
def UpperCAmelCase ( _lowercase : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase_ = qubits
# Using Aer's simulator
lowerCAmelCase_ = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase_ = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowercase ) ) , list(range(_lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase_ = qiskit.execute(_lowercase , _lowercase , shots=1_0_0_0 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""") | 552 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase_ = _symbol_database.Default()
lowercase_ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowercase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase_ = None
lowercase_ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase_ = 45
lowercase_ = 15_81
lowercase_ = 15_17
lowercase_ = 15_70
lowercase_ = 15_84
lowercase_ = 17_93
lowercase_ = 17_95
lowercase_ = 19_16
lowercase_ = 18_64
lowercase_ = 19_05
lowercase_ = 19_19
lowercase_ = 24_29
lowercase_ = 22_08
lowercase_ = 24_18
lowercase_ = 23_23
lowercase_ = 24_07
# @@protoc_insertion_point(module_scope) | 552 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 | """simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class snake_case__ ( snake_case_ ):
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__a = input_file.read()
__a = regexp.search(lowerCamelCase )
return match
def a__ ( self , lowerCamelCase ):
with open(lowerCamelCase , encoding="utf-8" ) as input_file:
__a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__a = regexp.finditer(lowerCamelCase )
__a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def a__ ( self ):
__a = Path("./datasets" )
__a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 67 | 0 |
"""simple docstring"""
UpperCAmelCase__ = {str(digit): digit**5 for digit in range(1_0)}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase ) )
def __UpperCAmelCase ( ):
"""simple docstring"""
return sum(
number
for number in range(10_00 ,1_00_00_00 )
if number == digits_fifth_powers_sum(lowercase ) )
if __name__ == "__main__":
print(solution())
| 277 | """simple docstring"""
from __future__ import annotations
import pandas as pd
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 9_99_99_99_99
_UpperCAmelCase = 0
_UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_UpperCAmelCase = remaining_time[j]
_UpperCAmelCase = j
_UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_UpperCAmelCase = remaining_time[short]
if minm == 0:
_UpperCAmelCase = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_UpperCAmelCase = False
# Find finish time of current process
_UpperCAmelCase = increment_time + 1
# Calculate waiting time
_UpperCAmelCase = finish_time - arrival_time[short]
_UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * no_of_processes
for i in range(lowercase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(lowercase ):
_UpperCAmelCase = total_waiting_time + waiting_time[i]
_UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
UpperCAmelCase__ = int(input())
UpperCAmelCase__ = [0] * no_of_processes
UpperCAmelCase__ = [0] * no_of_processes
UpperCAmelCase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
UpperCAmelCase__ , UpperCAmelCase__ = map(int, input().split())
UpperCAmelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase__ = burst_time
UpperCAmelCase__ = no_of_processes
UpperCAmelCase__ = waiting_time
UpperCAmelCase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 277 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _snake_case ( lowercase__ = 2000000 ):
_lowerCamelCase : list[int] = [0]
_lowerCamelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase : int = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase : float
# the largest integer less than b_estimate
_lowerCamelCase : int
# the largest integer less than b_estimate
_lowerCamelCase : int
# the triangle number corresponding to b_floor
_lowerCamelCase : int
# the triangle number corresponding to b_ceil
_lowerCamelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase : int = floor(lowercase__ )
_lowerCamelCase : List[Any] = ceil(lowercase__ )
_lowerCamelCase : int = triangle_numbers[b_floor]
_lowerCamelCase : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase : str = triangle_b_first_guess * triangle_a
_lowerCamelCase : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase : Optional[Any] = triangle_b_second_guess * triangle_a
_lowerCamelCase : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }") | 492 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 492 | 1 |
Subsets and Splits