code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(_a , max_perimeter + 1):
SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_a):
SCREAMING_SNAKE_CASE : int = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ ( _a = 1000):
SCREAMING_SNAKE_CASE : List[str] = pythagorean_triple(_a)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(F'''Perimeter {solution()} has maximum solutions''') | 25 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ : str = TOKENIZER_CLASSES
else:
a__ : int = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + "Fast" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ : Any = TOKENIZER_CLASSES[tokenizer_name]
a__ : Dict = True
if checkpoint_name is None:
a__ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ : List[str] = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ : List[str] = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ : Dict = checkpoint.split("/" )
a__ : Any = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
a__ : int = checkpoint
a__ : Optional[Any] = dump_path
else:
a__ : Dict = None
a__ : Optional[Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ : Optional[Any] = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
a__ : Union[str, Any] = os.path.join(__UpperCamelCase , __UpperCamelCase )
a__ : Any = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ : Any = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__UpperCamelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCamelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 191 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = torch.load(__a , map_location='cpu' )
if "model" in sd.keys():
snake_case_ : List[Any] = torch.load(__a , map_location='cpu' )['model']
# pop unnecessary weights
snake_case_ : int = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__a )
snake_case_ : Optional[Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case_ : int = sd.pop(__a )
snake_case_ : int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case_ : Dict = sd[key]
# We split QKV in separate Q,K,V
snake_case_ : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
snake_case_ : int = key.replace('.qkv_proj.' , '.k_proj.' )
snake_case_ : List[Any] = key.replace('.qkv_proj.' , '.v_proj.' )
snake_case_ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case_ ,snake_case_ ,snake_case_ : List[str] = torch.split(__a , depth // 3 , dim=0 )
snake_case_ : Any = q
snake_case_ : Union[str, Any] = k
snake_case_ : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None ):
snake_case_ : Dict = load_checkpoint(__a )
if config is not None:
snake_case_ : Dict = OPTConfig.from_pretrained(__a )
else:
snake_case_ : str = OPTConfig()
snake_case_ : Dict = OPTModel(__a ).half().eval()
model.load_state_dict(__a )
# Check results
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 534 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case_ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
snake_case_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
snake_case_ : str = ['key_proj', 'value_proj', 'query_proj']
snake_case_ : List[str] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case_ : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
snake_case_ : Optional[Any] = prophet
snake_case_ : Any = prophet_old
else:
snake_case_ : Optional[int] = prophet.prophetnet
snake_case_ : str = prophet_old.model
snake_case_ : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case_ : Optional[Any] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
snake_case_ : List[Any] = attribute
elif hasattr(__a , __a ):
snake_case_ : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case_ : int = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
snake_case_ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case_ : List[str] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
snake_case_ : int = True
break
elif attribute in special_keys and hasattr(__a , 'in_proj_weight' ):
snake_case_ : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case_ : List[Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case_ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case_ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case_ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case_ : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case_ : List[Any] = True
break
if attribute.isdigit():
snake_case_ : Any = model[int(__a )]
snake_case_ : Any = old_model[int(__a )]
else:
snake_case_ : str = getattr(__a , __a )
if old_attribute == "":
snake_case_ : Tuple = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
snake_case_ : List[str] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 534 | 1 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(UpperCAmelCase_ ) * abs(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 599 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
)
def __lowerCamelCase ( ) ->None:
snake_case__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ = math.log(len(UpperCAmelCase_ ) , 2 )
print(f'''Optimal value : {minimax(0 , 0 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCamelCase : List[Any] = """\
Text data.
Second line of data."""
__lowerCamelCase : Tuple = """file"""
@pytest.fixture(scope="session" )
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Dict = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCamelCase : List[Any] = bytes(lowerCAmelCase__ , "utf-8" )
with zstd.open(lowerCAmelCase__ , "wb" ) as f:
f.write(lowerCAmelCase__ )
return path
@pytest.fixture
def A_ ( _lowerCAmelCase ) -> Optional[int]:
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase__ ) , "w" ) as f:
f.write(lowerCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : int = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCamelCase : Union[str, Any] = input_paths[compression_format]
UpperCamelCase : List[Any] = tmp_path / "cache"
UpperCamelCase : Union[str, Any] = DownloadConfig(cache_dir=lowerCAmelCase__ , extract_compressed_file=lowerCAmelCase__ )
UpperCamelCase : str = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
with open(lowerCAmelCase__ ) as f:
UpperCamelCase : Optional[int] = f.read()
with open(lowerCAmelCase__ ) as f:
UpperCamelCase : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Dict = "custom_cache"
UpperCamelCase : Dict = "custom_extracted_dir"
UpperCamelCase : List[str] = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCamelCase : Optional[int] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowerCAmelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
UpperCamelCase : List[str] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase : Optional[int] = xz_file
UpperCamelCase : Tuple = (
DownloadConfig(extract_compressed_file=lowerCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase__ )
)
UpperCamelCase : Dict = cached_path(lowerCAmelCase__ , download_config=lowerCAmelCase__ )
assert Path(lowerCAmelCase__ ).parent.parts[-2:] == expected
def A_ ( _lowerCAmelCase ) -> Any:
# absolute path
UpperCamelCase : str = str(Path(lowerCAmelCase__ ).resolve() )
assert cached_path(lowerCAmelCase__ ) == text_file
# relative path
UpperCamelCase : Dict = str(Path(lowerCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase__ ) == text_file
def A_ ( _lowerCAmelCase ) -> int:
# absolute path
UpperCamelCase : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
# relative path
UpperCamelCase : Tuple = "./__missing_file__.txt"
with pytest.raises(lowerCAmelCase__ ):
cached_path(lowerCAmelCase__ )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(lowerCAmelCase__ ) as f:
UpperCamelCase : str = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase__ )
def A_ ( ) -> Optional[Any]:
with pytest.raises(lowerCAmelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase__ )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase__ ):
http_get("https://huggingface.co" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase__ )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase__ ):
ftp_get("ftp://huggingface.co" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCAmelCase__ )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCAmelCase__ ):
fsspec_get("s3://huggingface.co" , temp_file=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ ):
fsspec_head("s3://huggingface.co" )
| 714 |
from math import loga
def A_ ( _lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCAmelCase_ :
__lowerCamelCase = BlenderbotConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=20 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : List[Any] = pad_token_id
UpperCAmelCase__ : Optional[int] = bos_token_id
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotModel(config=_lowerCAmelCase ).get_decoder()
UpperCAmelCase__ : Dict = inputs_dict["""input_ids"""]
UpperCAmelCase__ : Union[str, Any] = input_ids[:1, :]
UpperCAmelCase__ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase__ : List[Any] = inputs_dict["""head_mask"""]
UpperCAmelCase__ : Optional[Any] = 1
# first forward pass
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : str = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
UpperCAmelCase__ : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Tuple = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = ['My friends are cool but they eat too many carbs.']
__lowerCamelCase = 'facebook/blenderbot-400M-distill'
@cached_property
def __UpperCAmelCase ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.tokenizer(self.src_text , return_tensors="""tf""" )
UpperCAmelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 79 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 5_0 , a = "pil" , a = True , **a , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , )
_A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_A = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_A = self.scheduler.step(a , a , a ).prev_sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a ), "This is a local test" | 317 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCAmelCase = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCAmelCase = 0
for log in Path().glob('''*.log'''):
UpperCAmelCase = 0
with open(log, '''r''') as f:
for line in f:
UpperCAmelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCAmelCase = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCAmelCase = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase = []
log.unlink()
UpperCAmelCase = ''''''
UpperCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase = []
UpperCAmelCase = {}
for test in failed_tests:
UpperCAmelCase = test[0].split('''::''')
UpperCAmelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase = [test[0] for test in failed_table]
UpperCAmelCase = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCAmelCase = '''Too many failed tests, please see the full report in the Action results.'''
UpperCAmelCase = len(err) + 10
UpperCAmelCase = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCAmelCase = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCAmelCase = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase = row[0]
else:
UpperCAmelCase = ''''''
UpperCAmelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 565 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
UpperCAmelCase = '''=======
>>>>>>>
'''
UpperCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
UpperCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case ):
lowercase = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ):
lowercase = get_logger('datasets-cli/converting' )
lowercase = tfds_path
lowercase = datasets_directory
def SCREAMING_SNAKE_CASE__ ( self ):
if os.path.isdir(self._tfds_path ):
lowercase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
lowercase = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase = []
lowercase = []
lowercase = {}
if os.path.isdir(self._tfds_path ):
lowercase = os.listdir(snake_case )
else:
lowercase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase = os.path.join(snake_case , snake_case )
lowercase = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case , encoding='utf-8' ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = False
lowercase = False
lowercase = []
for line in lines:
lowercase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
lowercase = ''
continue
elif "from absl import logging" in out_line:
lowercase = 'from datasets import logging\n'
elif "getLogger" in out_line:
lowercase = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase = True
lowercase = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
lowercase = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase = f_name.replace('.py' , '' )
lowercase = os.path.join(snake_case , snake_case )
lowercase = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines(snake_case )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase = os.path.basename(snake_case )
lowercase = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 565 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A : int = [
'good first issue',
'feature request',
'wip',
]
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = Github(os.environ["GITHUB_TOKEN"] )
UpperCamelCase__ = g.get_repo("huggingface/accelerate" )
UpperCamelCase__ = repo.get_issues(state="open" )
for issue in open_issues:
UpperCamelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda _snake_case : i.created_at , reverse=_snake_case )
UpperCamelCase__ = comments[0] if len(_snake_case ) > 0 else None
UpperCamelCase__ = dt.utcnow()
UpperCamelCase__ = (current_time - issue.updated_at).days
UpperCamelCase__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main() | 516 | """simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 516 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__( enum.Enum ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
lowerCAmelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__SCREAMING_SNAKE_CASE = None
if self.model.config.prefix is not None:
__SCREAMING_SNAKE_CASE = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__SCREAMING_SNAKE_CASE = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._sanitize_parameters(prefix=__SCREAMING_SNAKE_CASE , **self._forward_params )
__SCREAMING_SNAKE_CASE = {**self._preprocess_params, **preprocess_params}
__SCREAMING_SNAKE_CASE = {**self._forward_params, **forward_params}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if prefix is not None:
__SCREAMING_SNAKE_CASE = prefix
if prefix:
__SCREAMING_SNAKE_CASE = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
__SCREAMING_SNAKE_CASE = handle_long_generation
preprocess_params.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = generate_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE = ReturnType.TENSORS
if return_type is not None:
__SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]="" , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(
prefix + prompt_text , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = prompt_text
if handle_long_generation == "hole":
__SCREAMING_SNAKE_CASE = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs['''max_new_tokens''']
else:
__SCREAMING_SNAKE_CASE = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__SCREAMING_SNAKE_CASE = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__SCREAMING_SNAKE_CASE = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs['''input_ids''']
__SCREAMING_SNAKE_CASE = model_inputs.get('''attention_mask''' , __SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__SCREAMING_SNAKE_CASE = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__SCREAMING_SNAKE_CASE = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__SCREAMING_SNAKE_CASE = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__SCREAMING_SNAKE_CASE = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__SCREAMING_SNAKE_CASE = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = generated_sequence.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = generated_sequence.reshape(__SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.reshape(__SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=ReturnType.FULL_TEXT , __SCREAMING_SNAKE_CASE : List[str]=True ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs['''generated_sequence'''][0]
__SCREAMING_SNAKE_CASE = model_outputs['''input_ids''']
__SCREAMING_SNAKE_CASE = model_outputs['''prompt_text''']
__SCREAMING_SNAKE_CASE = generated_sequence.numpy().tolist()
__SCREAMING_SNAKE_CASE = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__SCREAMING_SNAKE_CASE = prompt_text + text[prompt_length:]
else:
__SCREAMING_SNAKE_CASE = text[prompt_length:]
__SCREAMING_SNAKE_CASE = {'''generated_text''': all_text}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 690 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ =logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , **__SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop('''candidate_labels''' )
__SCREAMING_SNAKE_CASE = model_outputs['''logits'''][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 690 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = '''albert'''
def __init__( self : str , _UpperCAmelCase : Optional[int]=30_000 , _UpperCAmelCase : int=128 , _UpperCAmelCase : Dict=4_096 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : str=16_384 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Any="gelu_new" , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : List[str]=1E-1_2 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : List[Any]="absolute" , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : int=3 , **_UpperCAmelCase : int , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = embedding_size
_A = hidden_size
_A = num_hidden_layers
_A = num_hidden_groups
_A = num_attention_heads
_A = inner_group_num
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout_prob
_A = position_embedding_type
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : str ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 7 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = "gpt_bigcode"
SCREAMING_SNAKE_CASE_: str = ["past_key_values"]
SCREAMING_SNAKE_CASE_: str = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , UpperCAmelCase_ : Dict=50_257 , UpperCAmelCase_ : Optional[int]=1_024 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple="gelu_pytorch_tanh" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=50_256 , UpperCAmelCase_ : Tuple=50_256 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : Optional[int] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 580 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
snake_case_ = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='test-feature-extractor' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
snake_case_ = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 717 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
"""simple docstring"""
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
snake_case_ = [''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1_00 , __UpperCamelCase = 5.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , ):
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
snake_case_ = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
snake_case_ = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
snake_case_ = torch.exp(__UpperCamelCase )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 46 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'unispeech-sat'
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : str=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="group" , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Tuple=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : List[str]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=1_2_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.05 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=3_2_0 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_0 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict="mean" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Tuple=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : List[Any]=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Dict=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=5_0_4 , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size
lowercase_ = feat_extract_norm
lowercase_ = feat_extract_activation
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = conv_bias
lowercase_ = num_conv_pos_embeddings
lowercase_ = num_conv_pos_embedding_groups
lowercase_ = len(self.conv_dim )
lowercase_ = num_hidden_layers
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = num_attention_heads
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = feat_proj_dropout
lowercase_ = final_dropout
lowercase_ = layerdrop
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = vocab_size
lowercase_ = num_clusters
lowercase_ = do_stable_layer_norm
lowercase_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ = apply_spec_augment
lowercase_ = mask_time_prob
lowercase_ = mask_time_length
lowercase_ = mask_time_min_masks
lowercase_ = mask_feature_prob
lowercase_ = mask_feature_length
lowercase_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase_ = num_codevectors_per_group
lowercase_ = num_codevector_groups
lowercase_ = contrastive_logits_temperature
lowercase_ = feat_quantizer_dropout
lowercase_ = num_negatives
lowercase_ = codevector_dim
lowercase_ = proj_codevector_dim
lowercase_ = diversity_loss_weight
# ctc loss
lowercase_ = ctc_loss_reduction
lowercase_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = xvector_output_dim
@property
def _lowercase ( self : Optional[Any] ) -> Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 97 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = '''examples/'''
lowerCAmelCase__ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase__ = '''README.md'''
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace("VERSION" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = re_pattern.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for folder, directories, fnames in os.walk(_SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , pattern="examples" )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(_SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "🤗 Transformers currently provides the following architectures"
UpperCamelCase = "1. Want to contribute a new model?"
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS["init"][0].search(_SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCamelCase = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F"Which version are you releasing? [{default_version}]" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
UpperCamelCase = default_version
print(F"Updating version to {version}." )
global_version_update(_SCREAMING_SNAKE_CASE , patch=_SCREAMING_SNAKE_CASE )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def a__ ( ):
"""simple docstring"""
UpperCamelCase = get_version()
UpperCamelCase = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F"Which version are we developing now? [{dev_version}]" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
UpperCamelCase = dev_version
print(F"Updating version to {version}." )
global_version_update(_SCREAMING_SNAKE_CASE )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 544 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCamelCase :
pass
| 544 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case : Optional[Any] = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 566 |
'''simple docstring'''
from collections import namedtuple
snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
snake_case : Any = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(__UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 1 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase_ = value_function
lowercase_ = unet
lowercase_ = scheduler
lowercase_ = env
lowercase_ = env.get_dataset()
lowercase_ = {}
for key in self.data.keys():
try:
lowercase_ = self.data[key].mean()
except: # noqa: E722
pass
lowercase_ = {}
for key in self.data.keys():
try:
lowercase_ = self.data[key].std()
except: # noqa: E722
pass
lowercase_ = env.observation_space.shape[0]
lowercase_ = env.action_space.shape[0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if type(UpperCAmelCase__ ) is dict:
return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase__ , device=self.unet.device )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key, val in cond.items():
lowercase_ = val.clone()
return x_in
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = x.shape[0]
lowercase_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase_ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase_ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample
lowercase_ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase_ = self.scheduler._get_variance(UpperCAmelCase__ )
lowercase_ = torch.exp(0.5 * posterior_variance )
lowercase_ = model_std * grad
lowercase_ = 0
lowercase_ = x.detach()
lowercase_ = x + scale * grad
lowercase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
lowercase_ = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
lowercase_ = self.to_torch(UpperCAmelCase__ )
return x, y
def __call__( self , UpperCAmelCase , UpperCAmelCase=64 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=0.1 ) -> int:
'''simple docstring'''
lowercase_ = self.normalize(UpperCAmelCase__ , "observations" )
lowercase_ = obs[None].repeat(UpperCAmelCase__ , axis=0 )
lowercase_ = {0: self.to_torch(UpperCAmelCase__ )}
lowercase_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase_ = randn_tensor(UpperCAmelCase__ , device=self.unet.device )
lowercase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
lowercase_ = self.to_torch(UpperCAmelCase__ )
# run the diffusion process
lowercase_ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# sort output trajectories by value
lowercase_ = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze()
lowercase_ = x[sorted_idx]
lowercase_ = sorted_values[:, :, : self.action_dim]
lowercase_ = actions.detach().cpu().numpy()
lowercase_ = self.de_normalize(UpperCAmelCase__ , key="actions" )
# select the action with the highest value
if y is not None:
lowercase_ = 0
else:
# if we didn't run value guiding, select a random action
lowercase_ = np.random.randint(0 , UpperCAmelCase__ )
lowercase_ = denorm_actions[selected_index, 0]
return denorm_actions | 718 |
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = 0
for i in range(1 , int(sqrt(__lowerCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__lowerCamelCase ):
total += i + n // i
elif i == sqrt(__lowerCamelCase ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 1_0000 ):
'''simple docstring'''
lowercase_ = sum(
i
for i in range(1 , __lowerCamelCase )
if sum_of_divisors(sum_of_divisors(__lowerCamelCase ) ) == i and sum_of_divisors(__lowerCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 601 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( A__ ) -> int:
a__ : Any = []
for line in lines:
a__ : Optional[int] = re.sub(R'#.*' , '' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
a__ : List[str] = '\n'.join(__lowerCamelCase )
# Make a hash from all this code
a__ : Tuple = full_str.encode('utf-8' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowercase : int = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase : List[Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase : int = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
lowercase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 302 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 559 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case__ = HfArgumentParser(InitializationArguments)
snake_case__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case__ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
snake_case__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 373 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'pegasus'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , __A : Dict=50265 , __A : List[Any]=1024 , __A : int=12 , __A : Optional[Any]=4096 , __A : Optional[int]=16 , __A : Dict=12 , __A : List[Any]=4096 , __A : List[str]=16 , __A : Optional[int]=0.0 , __A : List[Any]=0.0 , __A : List[str]=True , __A : Optional[int]=True , __A : str="gelu" , __A : Tuple=1024 , __A : Any=0.1 , __A : List[Any]=0.0 , __A : List[str]=0.0 , __A : Tuple=0.02 , __A : Union[str, Any]=0 , __A : Union[str, Any]=False , __A : Optional[Any]=0 , __A : Tuple=1 , __A : str=1 , **__A : Any , ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = vocab_size
a__ :List[str] = max_position_embeddings
a__ :int = d_model
a__ :Union[str, Any] = encoder_ffn_dim
a__ :List[Any] = encoder_layers
a__ :Union[str, Any] = encoder_attention_heads
a__ :Tuple = decoder_ffn_dim
a__ :List[Any] = decoder_layers
a__ :Tuple = decoder_attention_heads
a__ :Optional[int] = dropout
a__ :str = attention_dropout
a__ :Optional[int] = activation_dropout
a__ :str = activation_function
a__ :Dict = init_std
a__ :Any = encoder_layerdrop
a__ :int = decoder_layerdrop
a__ :Union[str, Any] = use_cache
a__ :List[Any] = encoder_layers
a__ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.d_model
| 373 | 1 |
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = '''examples/'''
__UpperCAmelCase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCAmelCase = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCAmelCase = '''README.md'''
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Tuple ) -> Dict:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : List[str] = f.read()
UpperCamelCase , UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern]
UpperCamelCase : Any = replace.replace('VERSION' , snake_case__ )
UpperCamelCase : str = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> int:
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = '🤗 Transformers currently provides the following architectures'
UpperCamelCase : Dict = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Tuple = f.readlines()
# Find the start of the list.
UpperCamelCase : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
UpperCamelCase : Union[str, Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def UpperCamelCase ( ) -> Dict:
with open(REPLACE_FILES['init'] , 'r' ) as f:
UpperCamelCase : Any = f.read()
UpperCamelCase : Optional[int] = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def UpperCamelCase ( snake_case__ : Any=False ) -> Tuple:
UpperCamelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
UpperCamelCase : List[str] = default_version.base_version
elif patch:
UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(snake_case__ ) == 0:
UpperCamelCase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ , patch=snake_case__ )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = get_version()
UpperCamelCase : Optional[Any] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
UpperCamelCase : Dict = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(snake_case__ ) == 0:
UpperCamelCase : Any = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(snake_case__ )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 40 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = """▁"""
_lowercase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 707 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''markuplm'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case=0 , _snake_case=2 , _snake_case=256 , _snake_case=1_024 , _snake_case=216 , _snake_case=1_001 , _snake_case=32 , _snake_case=50 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
# additional properties
__a = max_depth
__a = max_xpath_tag_unit_embeddings
__a = max_xpath_subs_unit_embeddings
__a = tag_pad_id
__a = subs_pad_id
__a = xpath_unit_hidden_size | 219 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A : Any = logging.get_logger(__name__)
class __A:
def __init__( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
if os.path.isfile(_snake_case ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
__a = os.path.join(_snake_case , '''question_encoder_tokenizer''' )
__a = os.path.join(_snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , _snake_case )
if config is None:
__a = RagConfig.from_pretrained(_snake_case )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case , generator=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.current_tokenizer(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.generator.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return self.generator.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.generator
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "longest" , _snake_case = None , _snake_case = True , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
__a = labels['''input_ids''']
return model_inputs | 219 | 1 |
def _snake_case ( __snake_case = 1_0_0 ) -> int:
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[str] = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
UpperCAmelCase_ : Optional[int] = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 711 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(lowercase ,lowercase) else "train"
UpperCAmelCase_ : Optional[int] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : Tuple = streaming
UpperCAmelCase_ : Union[str, Any] = num_proc
UpperCAmelCase_ : Union[str, Any] = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
class snake_case_ (lowercase__ ):
"""simple docstring"""
def __init__( self ,lowercase = None ,lowercase = None ,lowercase = False ,lowercase = False ,lowercase = None ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[str] = keep_in_memory
UpperCAmelCase_ : Optional[int] = streaming
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : str = kwargs
@abstractmethod
def A_ ( self):
"""simple docstring"""
pass
| 455 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> np.array:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
lowerCamelCase_ = 'f32le'
lowerCamelCase_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCamelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase_ = ffmpeg_process.communicate(__UpperCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase_ = output_stream[0]
lowerCamelCase_ = np.frombuffer(__UpperCamelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = "f32le" ,) -> Union[str, Any]:
lowerCamelCase_ = f'''{sampling_rate}'''
lowerCamelCase_ = '1'
if format_for_conversion == "s16le":
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase_ = platform.system()
if system == "Linux":
lowerCamelCase_ = 'alsa'
lowerCamelCase_ = 'default'
elif system == "Darwin":
lowerCamelCase_ = 'avfoundation'
lowerCamelCase_ = ':0'
elif system == "Windows":
lowerCamelCase_ = 'dshow'
lowerCamelCase_ = 'default'
lowerCamelCase_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase_ = _ffmpeg_stream(__UpperCamelCase ,__UpperCamelCase )
for item in iterator:
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "f32le" ,) -> Any:
if stream_chunk_s is not None:
lowerCamelCase_ = stream_chunk_s
else:
lowerCamelCase_ = chunk_length_s
lowerCamelCase_ = ffmpeg_microphone(__UpperCamelCase ,__UpperCamelCase ,format_for_conversion=__UpperCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase_ = np.intaa
lowerCamelCase_ = 2
elif format_for_conversion == "f32le":
lowerCamelCase_ = np.floataa
lowerCamelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase_ = chunk_length_s / 6
lowerCamelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCamelCase ,(int, float) ):
lowerCamelCase_ = [stride_length_s, stride_length_s]
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase_ = datetime.datetime.now()
lowerCamelCase_ = datetime.timedelta(seconds=__UpperCamelCase )
for item in chunk_bytes_iter(__UpperCamelCase ,__UpperCamelCase ,stride=(stride_left, stride_right) ,stream=__UpperCamelCase ):
# Put everything back in numpy scale
lowerCamelCase_ = np.frombuffer(item['raw'] ,dtype=__UpperCamelCase )
lowerCamelCase_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ) -> Optional[Any]:
lowerCamelCase_ = b''
lowerCamelCase_ ,lowerCamelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCamelCase ) < chunk_len:
lowerCamelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase_ = (_stride_left, stride_right)
lowerCamelCase_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase_ = False
yield item
lowerCamelCase_ = stride_left
lowerCamelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCamelCase ) > stride_left:
lowerCamelCase_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase_ = False
yield item
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCamelCase ,stdout=subprocess.PIPE ,bufsize=__UpperCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase_ = ffmpeg_process.stdout.read(__UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 42 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__UpperCAmelCase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__UpperCAmelCase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__UpperCAmelCase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
__UpperCAmelCase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__UpperCAmelCase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=[1, 10, 1_00] , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : List[Any]=3.0 ) -> Tuple:
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=lowerCAmelCase ) as executor:
__lowerCAmelCase : str = []
__lowerCAmelCase : Any = Counter()
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = defaultdict(lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(lowerCAmelCase , lowerCAmelCase ) ):
for candidate in candidates:
__lowerCAmelCase : Any = candidate + """\n""" + test_case
__lowerCAmelCase : Dict = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase : int = executor.submit(lowerCAmelCase , *lowerCAmelCase )
futures.append(lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCAmelCase ):
__lowerCAmelCase : Tuple = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = [], []
for result in results.values():
result.sort()
__lowerCAmelCase : Optional[Any] = [r[1]["""passed"""] for r in result]
total.append(len(lowerCAmelCase ) )
correct.append(sum(lowerCAmelCase ) )
__lowerCAmelCase : Dict = np.array(lowerCAmelCase )
__lowerCAmelCase : int = np.array(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = k
__lowerCAmelCase : Dict = {f'''pass@{k}''': estimate_pass_at_k(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def snake_case_ (__A : Any , __A : Optional[Any] , __A : Optional[Any] ) -> int:
def estimator(__A : int , __A : int , __A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__A , __A ):
__lowerCAmelCase : List[Any] = itertools.repeat(__A , len(__A ) )
else:
assert len(__A ) == len(__A )
__lowerCAmelCase : str = iter(__A )
return np.array([estimator(int(__A ) , int(__A ) , __A ) for n, c in zip(__A , __A )] )
| 218 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = {}, {}
if padding is not None:
__lowerCAmelCase : List[Any] = padding
if truncation is not None:
__lowerCAmelCase : int = truncation
if top_k is not None:
__lowerCAmelCase : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union["Image.Image", str] , lowerCAmelCase : str = None , **lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase , (Image.Image, str) ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowerCAmelCase : Any = {"""image""": image, """question""": question}
else:
__lowerCAmelCase : List[str] = image
__lowerCAmelCase : str = super().__call__(lowerCAmelCase , **lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False ) -> Any:
"""simple docstring"""
__lowerCAmelCase : int = load_image(inputs["""image"""] )
__lowerCAmelCase : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase , truncation=lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model(**lowerCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0]
__lowerCAmelCase ,__lowerCAmelCase : Tuple = probs.topk(lowerCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase , lowerCAmelCase )]
| 218 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( __snake_case ):
_a : Optional[Any]= 42
class __snake_case ( __snake_case , __snake_case ):
@register_to_config
def __init__( self ,snake_case = 65536 ,snake_case = None ,snake_case = 2 ,snake_case = 2 ,snake_case = 0 ,snake_case = "fourier" ,snake_case = True ,snake_case = False ,snake_case = 0.0 ,snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,snake_case = "UNetMidBlock1D" ,snake_case = None ,snake_case = (32, 32, 64) ,snake_case = None ,snake_case = 8 ,snake_case = 1 ,snake_case = False ,):
'''simple docstring'''
super().__init__()
lowercase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
lowercase : Optional[int] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=snake_case_ ,log=snake_case_ ,flip_sin_to_cos=snake_case_ )
lowercase : List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : Union[str, Any] = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=snake_case_ ,downscale_freq_shift=snake_case_ )
lowercase : Tuple = block_out_channels[0]
if use_timestep_embedding:
lowercase : List[Any] = block_out_channels[0] * 4
lowercase : Optional[Any] = TimestepEmbedding(
in_channels=snake_case_ ,time_embed_dim=snake_case_ ,act_fn=snake_case_ ,out_dim=block_out_channels[0] ,)
lowercase : int = nn.ModuleList([] )
lowercase : Union[str, Any] = None
lowercase : str = nn.ModuleList([] )
lowercase : Tuple = None
# down
lowercase : Dict = in_channels
for i, down_block_type in enumerate(snake_case_ ):
lowercase : Dict = output_channel
lowercase : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : int = i == len(snake_case_ ) - 1
lowercase : int = get_down_block(
snake_case_ ,num_layers=snake_case_ ,in_channels=snake_case_ ,out_channels=snake_case_ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(snake_case_ )
# mid
lowercase : Union[str, Any] = get_mid_block(
snake_case_ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=snake_case_ ,add_downsample=snake_case_ ,)
# up
lowercase : str = list(reversed(snake_case_ ) )
lowercase : Any = reversed_block_out_channels[0]
if out_block_type is None:
lowercase : Optional[int] = out_channels
else:
lowercase : Any = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
lowercase : List[Any] = output_channel
lowercase : Tuple = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
lowercase : Dict = i == len(snake_case_ ) - 1
lowercase : Any = get_up_block(
snake_case_ ,num_layers=snake_case_ ,in_channels=snake_case_ ,out_channels=snake_case_ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(snake_case_ )
lowercase : List[str] = output_channel
# out
lowercase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
lowercase : Optional[int] = get_out_block(
out_block_type=snake_case_ ,num_groups_out=snake_case_ ,embed_dim=block_out_channels[0] ,out_channels=snake_case_ ,act_fn=snake_case_ ,fc_dim=block_out_channels[-1] // 4 ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = True ,):
'''simple docstring'''
lowercase : str = timestep
if not torch.is_tensor(snake_case_ ):
lowercase : List[Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
lowercase : Optional[int] = timesteps[None].to(sample.device )
lowercase : List[str] = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
lowercase : Optional[int] = self.time_mlp(snake_case_ )
else:
lowercase : Optional[int] = timestep_embed[..., None]
lowercase : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : Optional[Any] = ()
for downsample_block in self.down_blocks:
lowercase , lowercase : Union[str, Any] = downsample_block(hidden_states=snake_case_ ,temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : List[str] = self.mid_block(snake_case_ ,snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : Tuple = down_block_res_samples[-1:]
lowercase : List[str] = down_block_res_samples[:-1]
lowercase : Dict = upsample_block(snake_case_ ,res_hidden_states_tuple=snake_case_ ,temb=snake_case_ )
# 5. post-process
if self.out_block:
lowercase : str = self.out_block(snake_case_ ,snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 336 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 0 |
from math import factorial, pi
def _lowercase ( a_ : Union[str, Any] ,a_ : Tuple = 3_0 ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE_ ,(int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
__magic_name__ = float(SCREAMING_SNAKE_CASE_ )
__magic_name__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( a_ : List[Any] ,a_ : Tuple = 3_0 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ ,(int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
__magic_name__ = float(SCREAMING_SNAKE_CASE_ )
__magic_name__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 715 |
import requests
from bsa import BeautifulSoup
def _lowercase ( a_ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
__magic_name__ = BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__magic_name__ = soup.findAll('h1' )
__magic_name__ = soup.findAll('div' ,{'class': 'maincounter-number'} )
keys += soup.findAll('span' ,{'class': 'panel-title'} )
values += soup.findAll('div' ,{'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(a_ ,a_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 184 | 0 |
def lowerCamelCase__ ( snake_case_ : list ) -> int:
if any(not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(UpperCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 592 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 583 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class a__ ( __lowerCamelCase ):
def __init__( self , _a , _a , _a ):
lowercase : Dict = dataset
lowercase : Any = process
lowercase : List[Any] = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _a ):
lowercase : Optional[Any] = self.dataset[i]
lowercase : List[str] = self.process(a_ , **self.params )
return processed
class a__ ( __lowerCamelCase ):
def __init__( self , _a , _a , _a , _a=None ):
lowercase : int = loader
lowercase : Optional[int] = infer
lowercase : Union[str, Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowercase : Optional[int] = None
lowercase : List[Any] = loader_batch_size
# Internal bookkeeping
lowercase : str = None
lowercase : List[Any] = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
lowercase : Any = iter(self.loader )
return self
def __magic_name__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowercase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowercase : Optional[int] = {}
for k, element in self._loader_batch_data.items():
if isinstance(a_ , a_ ):
# Convert ModelOutput to tuple first
lowercase : Union[str, Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowercase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(a_ , a_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowercase : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowercase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowercase : Dict = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowercase : Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowercase : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowercase : List[Any] = self._loader_batch_data.__class__(a_ )
self._loader_batch_index += 1
return result
def __magic_name__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowercase : str = next(self.iterator )
lowercase : Optional[Any] = self.infer(a_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(a_ , torch.Tensor ):
lowercase : Union[str, Any] = processed
else:
lowercase : str = list(processed.keys() )[0]
lowercase : Any = processed[key]
if isinstance(a_ , a_ ):
lowercase : Tuple = len(a_ )
else:
lowercase : Optional[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase : List[str] = observed_batch_size
# Setting internal index to unwrap the batch
lowercase : Any = processed
lowercase : Dict = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class a__ ( __lowerCamelCase ):
def __init__( self , _a , _a , _a , _a=None ):
super().__init__(a_ , a_ , a_ )
def __iter__( self ):
lowercase : Optional[int] = iter(self.loader )
lowercase : List[str] = None
return self
def __magic_name__ ( self ):
if self.subiterator is None:
lowercase : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowercase : str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowercase : Tuple = self.infer(next(self.iterator ) , **self.params )
lowercase : Dict = next(self.subiterator )
return processed
class a__ ( __lowerCamelCase ):
def __iter__( self ):
lowercase : Dict = iter(self.loader )
return self
def __magic_name__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowercase : Tuple = False
lowercase : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowercase : Tuple = self.loader_batch_item()
lowercase : Tuple = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
while not is_last:
lowercase : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(a_ , torch.Tensor ):
lowercase : List[str] = processed
else:
lowercase : Union[str, Any] = list(processed.keys() )[0]
lowercase : Any = processed[key]
if isinstance(a_ , a_ ):
lowercase : List[str] = len(a_ )
else:
lowercase : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowercase : str = observed_batch_size
lowercase : Optional[int] = processed
lowercase : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
lowercase : Union[str, Any] = self.loader_batch_item()
lowercase : Tuple = item.pop("is_last" )
accumulator.append(a_ )
if is_last:
return accumulator
else:
lowercase : Any = processed
lowercase : Dict = item.pop("is_last" )
accumulator.append(a_ )
return accumulator
class a__ ( __lowerCamelCase ):
def __init__( self , _a , _a ):
lowercase : int = dataset
lowercase : Dict = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _a ):
return self.dataset[i][self.key]
class a__ ( __lowerCamelCase ):
def __init__( self , _a , _a , _a ):
lowercase : Optional[Any] = dataset
lowercase : Optional[int] = keya
lowercase : str = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _a ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 707 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A : str = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_A : Optional[int] = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = BlenderbotSmallTokenizer
def __init__( self , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , _a=True , **_a , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_a , merges=_a , add_prefix_space=_a , trim_offsets=_a , ) , bos_token=_a , eos_token=_a , unk_token=_a , **_a , )
lowercase : Dict = add_prefix_space
def __magic_name__ ( self , _a , _a=None ):
lowercase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self , _a , _a = None ):
lowercase : List[str] = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 518 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
_lowerCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=0 , ) -> int:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = projection_dim
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_lowerCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRContextEncoder(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = TFDPRReader(config=UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_: List[str] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: Optional[Any] = False
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: List[str] = False
SCREAMING_SNAKE_CASE_: List[str] = False
def __lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCAmelCase = TFDPRModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def __lowerCamelCase ( self : str ) -> str:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_lowerCAmelCase = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowerCAmelCase = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowerCAmelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 580 | 1 |
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : str , __lowercase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(__lowercase ), len(__lowercase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowercase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowercase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(__lowercase )
if mismatch_index == -1:
positions.append(__lowercase )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCamelCase__ = """ABAABA"""
UpperCamelCase__ = """AB"""
UpperCamelCase__ = BoyerMooreSearch(text, pattern)
UpperCamelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def _snake_case ( snake_case__ : list , snake_case__ : int = 3 ):
A = min(snake_case__ )
A = max(snake_case__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case__ ) for x in data]
def _snake_case ( snake_case__ : list , snake_case__ : int = 3 ):
A = mean(snake_case__ )
A = stdev(snake_case__ )
# standardize data
return [round((x - mu) / (sigma) , snake_case__ ) for x in data] | 91 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase = get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving model to {ckpt_dir}' )
A = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = (
os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
A = state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case__ )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A = torch.load(snake_case__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A = (
os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
A = optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ ) | 91 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE_ ) )
def __a(SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE_ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE_ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Color current vertex
_lowerCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ):
return True
# Backtrack
_lowerCAmelCase = -1
return False
def __a(SCREAMING_SNAKE_CASE_ : list[list[int]] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE_ )
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ):
return colored_vertices
return []
| 489 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
raise NotImplementedError()
def _snake_case ( self ) -> Optional[int]:
raise NotImplementedError()
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , **_lowerCAmelCase ) -> str:
_lowerCAmelCase = tokenizer
_lowerCAmelCase = skip_prompt
_lowerCAmelCase = decode_kwargs
# variables used in the streaming process
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = True
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
_lowerCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase = text[self.print_len :]
self.print_len += len(_lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_lowerCAmelCase )
self.on_finalized_text(_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase = text[self.print_len :]
_lowerCAmelCase = []
_lowerCAmelCase = 0
else:
_lowerCAmelCase = ""
_lowerCAmelCase = True
self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]:
print(_lowerCAmelCase , flush=_lowerCAmelCase , end="" if not stream_end else None )
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase ) -> Dict:
super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = Queue()
_lowerCAmelCase = None
_lowerCAmelCase = timeout
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False ) -> Any:
self.text_queue.put(_lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> int:
return self
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 489 | 1 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self , *,
_lowerCamelCase = 4 , _lowerCamelCase = 768 , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.zeros(_lowerCamelCase ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
# parameters for encoder hidden states
lowerCAmelCase_ = clip_extra_context_tokens
lowerCAmelCase_ = nn.Linear(
_lowerCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ = nn.Linear(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = nn.LayerNorm(_lowerCamelCase )
def UpperCAmelCase_ ( self , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ = image_embeddings.shape[0]
lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ = classifier_free_guidance_embeddings.expand(
_lowerCamelCase , -1 )
lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ = self.embedding_proj(_lowerCamelCase )
lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_lowerCamelCase )
lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ = self.clip_extra_context_tokens_proj(_lowerCamelCase )
lowerCAmelCase_ = clip_extra_context_tokens.reshape(_lowerCamelCase , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ = self.encoder_hidden_states_proj(_lowerCamelCase )
lowerCAmelCase_ = self.text_encoder_hidden_states_norm(_lowerCamelCase )
lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 274 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : Optional[int] = 16
lowercase_ : Tuple = 32
def SCREAMING_SNAKE_CASE ( lowercase_ : Accelerator , lowercase_ : int = 16 ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : Any = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Dict ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
lowercase = 2
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase = batch_size // MAX_GPU_BATCH_SIZE
lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
lowercase , lowercase = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowercase_ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 75 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__:
lowerCAmelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The column name of the images in the files.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class A__:
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A__( __magic_name__ ):
lowerCAmelCase = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _a ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split['''train''']
__SCREAMING_SNAKE_CASE = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(UpperCAmelCase__ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = '''img'''
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE = (image_processor.size['''height'''], image_processor.size['''width'''])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase__ )
trainer.save_metrics('''eval''' , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 482 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case_ ( lowercase__ ):
def __init__( self , a_ , a_ ):
a_ : List[str] = params
a_ : List[Any] = np.array(__lowercase )
a_ : Dict = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , a_ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def snake_case_ ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case_ ( self ):
a_ : Optional[Any] = self.params.max_model_input_size
a_ : Any = self.lengths > max_len
logger.info(F"""Splitting {sum(__lowercase )} too long sequences.""" )
def divide_chunks(a_ , a_ ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a_ : Union[str, Any] = []
a_ : int = []
if self.params.mlm:
a_ , a_ : Tuple = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
a_ , a_ : Optional[int] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a_ : int = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a_ : Optional[int] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a_ : Optional[Any] = np.array(__lowercase )
a_ : Union[str, Any] = np.array(__lowercase )
def snake_case_ ( self ):
a_ : int = len(self )
a_ : Optional[Any] = self.lengths > 1_1
a_ : Tuple = self.token_ids[indices]
a_ : Optional[Any] = self.lengths[indices]
a_ : Any = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def snake_case_ ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
a_ : Tuple = self.params.special_tok_ids["unk_token"]
a_ : Union[str, Any] = len(self )
a_ : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a_ : Dict = (unk_occs / self.lengths) < 0.5
a_ : str = self.token_ids[indices]
a_ : str = self.lengths[indices]
a_ : List[str] = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def snake_case_ ( self ):
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_ ( self , a_ ):
a_ : Optional[Any] = [t[0] for t in batch]
a_ : int = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a_ : Union[str, Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a_ : List[Any] = self.params.special_tok_ids["pad_token"]
else:
a_ : Dict = self.params.special_tok_ids["unk_token"]
a_ : Optional[int] = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a_ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a_ : Any = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t | 715 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : List[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
a_ : List[str] = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ )
a_ : Dict = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
a_ : List[str] = 847
a_ : Optional[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
a_ : List[str] = 150
a_ : Tuple = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
a_ : Union[str, Any] = 171
a_ : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
a_ : Optional[Any] = 133
a_ : List[str] = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
a_ : Union[str, Any] = 19
a_ : Any = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
a_ : int = 65
a_ : Any = "mapillary-vistas-id2label.json"
a_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type="dataset" ), "r" ) )
a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : str = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
a_ : str = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Tuple = in_proj_weight[:dim, :]
a_ : Union[str, Any] = in_proj_bias[: dim]
a_ : Dict = in_proj_weight[
dim : dim * 2, :
]
a_ : Tuple = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : str = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
# fmt: off
a_ : List[str] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
a_ : int = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : int = in_proj_weight[: hidden_size, :]
a_ : Tuple = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size :, :]
a_ : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
a_ : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[: hidden_size, :]
a_ : Optional[Any] = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : str = in_proj_bias[hidden_size : hidden_size * 2]
a_ : List[Any] = in_proj_weight[-hidden_size :, :]
a_ : Dict = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
a_ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False ) -> Optional[int]:
a_ : List[Any] = get_maskformer_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
a_ : int = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
a_ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__, config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# update to torch tensors
for key, value in state_dict.items():
a_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# load 🤗 model
a_ : Tuple = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE__, param.shape )
a_ , a_ : int = model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
a_ : List[Any] = prepare_img()
if "vistas" in model_name:
a_ : int = 65
elif "cityscapes" in model_name:
a_ : str = 65_535
else:
a_ : int = 255
a_ : List[str] = True if "ade" in model_name else False
a_ : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__, reduce_labels=SCREAMING_SNAKE_CASE__ )
a_ : int = image_processor(SCREAMING_SNAKE_CASE__, return_tensors="pt" )
a_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
a_ : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 370 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCAmelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCAmelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCAmelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCAmelCase = np.expand_dims(test_image, axis=0)
__UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCAmelCase = '''Normal'''
if result[0][0] == 1:
__UpperCAmelCase = '''Abnormality detected'''
| 40 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = '''us-east-1''' # defaults region
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str
UpperCAmelCase__ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase__ : Dict = {**hyperparameters, "max_steps": 1000}
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCamelCase ( snake_case__ : Any ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 40 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
UpperCAmelCase_ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ElectraTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_snake_case = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = tokenize_chinese_chars
_snake_case = normalizer_class(**lowerCAmelCase_ )
_snake_case = do_lower_case
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 541 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=[1, 2, 1] , lowerCAmelCase_=[2, 2, 4] , lowerCAmelCase_=2 , lowerCAmelCase_=2.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=10 , lowerCAmelCase_=8 , ) -> Tuple:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = patch_norm
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = is_training
_snake_case = scope
_snake_case = use_labels
_snake_case = type_sequence_label_size
_snake_case = encoder_stride
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self ) -> Optional[Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = SwinvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
_snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_snake_case = SwinvaForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case = 1
_snake_case = SwinvaForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = self.type_sequence_label_size
_snake_case = SwinvaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase_ = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCAmelCase ( self ) -> Any:
_snake_case = SwinvaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 )
def lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def lowerCAmelCase ( self ) -> Optional[Any]:
pass
def lowerCAmelCase ( self ) -> Dict:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
_snake_case = len(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = config.window_size**2
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_snake_case = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
_snake_case = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_snake_case = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase_ ) )
_snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# Swinv2 has a different seq_length
_snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_snake_case = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case , _snake_case = reshaped_hidden_states[0].shape
_snake_case = (
reshaped_hidden_states[0].view(lowerCAmelCase_ , lowerCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = 3
_snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> Dict:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self ) -> int:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = SwinvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase ( self ) -> str:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_snake_case = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
lowerCAmelCase_ )
_snake_case = self.default_image_processor
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 541 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : Union[str, Any] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Tuple = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Tuple = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : int = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__A : Dict = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__A : Dict = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
_A = checkpoint[F"{old_prefix}.in_layers.0.weight"]
_A = checkpoint[F"{old_prefix}.in_layers.0.bias"]
_A = checkpoint[F"{old_prefix}.in_layers.2.weight"]
_A = checkpoint[F"{old_prefix}.in_layers.2.bias"]
_A = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
_A = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
_A = checkpoint[F"{old_prefix}.out_layers.0.weight"]
_A = checkpoint[F"{old_prefix}.out_layers.0.bias"]
_A = checkpoint[F"{old_prefix}.out_layers.3.weight"]
_A = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
_A = checkpoint[F"{old_prefix}.skip_connection.weight"]
_A = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
_A, _A, _A = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
_A, _A, _A = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
_A = checkpoint[F"{old_prefix}.norm.weight"]
_A = checkpoint[F"{old_prefix}.norm.bias"]
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
_A = {}
_A = checkpoint['time_embed.0.weight']
_A = checkpoint['time_embed.0.bias']
_A = checkpoint['time_embed.2.weight']
_A = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_A = checkpoint['label_emb.weight']
_A = checkpoint['input_blocks.0.0.weight']
_A = checkpoint['input_blocks.0.0.bias']
_A = unet_config['down_block_types']
_A = unet_config['layers_per_block']
_A = unet_config['attention_head_dim']
_A = unet_config['block_out_channels']
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
_A = F"down_blocks.{i}.resnets.{j}"
_A = F"input_blocks.{current_layer}.0"
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_SCREAMING_SNAKE_CASE ):
_A = F"down_blocks.{i}.resnets.{j}"
_A = F"input_blocks.{current_layer}.0"
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
_A = F"down_blocks.{i}.attentions.{j}"
_A = F"input_blocks.{current_layer}.1"
_A = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
_A = F"down_blocks.{i}.downsamplers.0"
_A = F"input_blocks.{current_layer}.0"
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = 'mid_block.resnets.0'
_A = 'middle_block.0'
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = 'mid_block.attentions.0'
_A = 'middle_block.1'
_A = convert_attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = 'mid_block.resnets.1'
_A = 'middle_block.2'
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = 0
_A = unet_config['up_block_types']
for i, layer_type in enumerate(_SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F"up_blocks.{i}.resnets.{j}"
_A = F"output_blocks.{current_layer}.0"
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
_A = F"up_blocks.{i}.upsamplers.0"
_A = F"output_blocks.{current_layer-1}.1"
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F"up_blocks.{i}.resnets.{j}"
_A = F"output_blocks.{current_layer}.0"
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , has_skip=_SCREAMING_SNAKE_CASE )
_A = F"up_blocks.{i}.attentions.{j}"
_A = F"output_blocks.{current_layer}.1"
_A = convert_attention(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(_SCREAMING_SNAKE_CASE ) - 1:
_A = F"up_blocks.{i}.upsamplers.0"
_A = F"output_blocks.{current_layer-1}.2"
_A = convert_resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = checkpoint['out.0.weight']
_A = checkpoint['out.0.bias']
_A = checkpoint['out.2.weight']
_A = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__A : Optional[Any] = parser.parse_args()
__A : List[str] = strabool(args.class_cond)
__A : List[str] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
__A : Optional[int] = None
__A : str = con_pt_to_diffuser(args.unet_path, unet_config)
__A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
__A : Dict = CMStochasticIterativeScheduler(**scheduler_config)
__A : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 27 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 478 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "levit"
def __init__( self , __UpperCamelCase=224 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 8, 12] , __UpperCamelCase=[4, 4, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
__a : int = image_size
__a : str = num_channels
__a : str = kernel_size
__a : Tuple = stride
__a : List[Any] = padding
__a : Optional[Any] = hidden_sizes
__a : str = num_attention_heads
__a : Optional[Any] = depths
__a : Union[str, Any] = key_dim
__a : List[str] = drop_path_rate
__a : Union[str, Any] = patch_size
__a : Optional[Any] = attention_ratio
__a : List[str] = mlp_ratio
__a : Union[str, Any] = initializer_range
__a : Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4 | 697 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 697 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase : List[str] = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowercase : Any = 3E8 # unit of c : m * s^-1
def __a ( A__ , A__ , A__ ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase : int = 'examples/'
lowercase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase : Union[str, Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase : Union[str, Any] = 'README.md'
def __a ( A__ , A__ , A__ ) -> Dict:
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase , lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace("VERSION" , A__ )
lowerCAmelCase = re_pattern.sub(A__ , A__ )
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(A__ )
def __a ( A__ ) -> List[Any]:
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern="examples" )
def __a ( A__ , A__=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def __a ( ) -> List[str]:
lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
lowerCAmelCase = "1. Want to contribute a new model?"
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A__ )
def __a ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def __a ( A__=False ) -> Optional[int]:
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(A__ , patch=A__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __a ( ) -> Tuple:
lowerCAmelCase = get_version()
lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(A__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 649 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 198 |
import sys
a__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowercase ( SCREAMING_SNAKE_CASE__ : str = N ) -> int:
_snake_case : int = -sys.maxsize - 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ):
_snake_case : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_snake_case : List[str] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase__ : Tuple = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase__ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
lowercase__ : Optional[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
lowercase__ : Optional[Any] = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
lowercase__ : Any = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
lowercase__ : str = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 390 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''vit_msn'''
def __init__( self : List[str] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[Any]=1e-06 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) ->int:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 390 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'conditional_detr'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=3_00 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Any=6 , lowerCAmelCase_ : Any=20_48 , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : Union[str, Any]=2_56 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Optional[Any]=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]="sine" , lowerCAmelCase_ : Optional[int]="resnet50" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : int=0.25 , **lowerCAmelCase_ : int , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[int] =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Tuple =backbone_config.get("""model_type""" )
A__ : List[str] =CONFIG_MAPPING[backbone_model_type]
A__ : Dict =config_class.from_dict(lowerCAmelCase_ )
A__ : int =use_timm_backbone
A__ : List[Any] =backbone_config
A__ : Optional[int] =num_channels
A__ : Optional[int] =num_queries
A__ : Union[str, Any] =d_model
A__ : Optional[int] =encoder_ffn_dim
A__ : Optional[Any] =encoder_layers
A__ : int =encoder_attention_heads
A__ : Optional[Any] =decoder_ffn_dim
A__ : Tuple =decoder_layers
A__ : Optional[Any] =decoder_attention_heads
A__ : Tuple =dropout
A__ : int =attention_dropout
A__ : Dict =activation_dropout
A__ : Union[str, Any] =activation_function
A__ : List[str] =init_std
A__ : str =init_xavier_std
A__ : int =encoder_layerdrop
A__ : List[Any] =decoder_layerdrop
A__ : Tuple =encoder_layers
A__ : Tuple =auxiliary_loss
A__ : List[Any] =position_embedding_type
A__ : int =backbone
A__ : Optional[int] =use_pretrained_backbone
A__ : str =dilation
# Hungarian matcher
A__ : Any =class_cost
A__ : str =bbox_cost
A__ : str =giou_cost
# Loss coefficients
A__ : Union[str, Any] =mask_loss_coefficient
A__ : int =dice_loss_coefficient
A__ : Union[str, Any] =cls_loss_coefficient
A__ : List[str] =bbox_loss_coefficient
A__ : str =giou_loss_coefficient
A__ : Optional[Any] =focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return self.d_model
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : int =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : str =self.backbone_config.to_dict()
A__ : int =self.__class__.model_type
return output
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ (lowercase__ ):
snake_case =42
snake_case =42
def __init__( self , lowercase_ , lowercase_) -> List[Any]:
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__)
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = 2000 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , **lowercase_ , ) -> Union[ImagePipelineOutput, Tuple]:
a__ =self.unet.config.sample_size
a__ =(batch_size, 3, img_size, img_size)
a__ =self.unet
a__ =randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__) * self.scheduler.init_noise_sigma
a__ =sample.to(self.device)
self.scheduler.set_timesteps(UpperCAmelCase__)
self.scheduler.set_sigmas(UpperCAmelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
a__ =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
a__ =self.unet(UpperCAmelCase__ , UpperCAmelCase__).sample
a__ =self.scheduler.step_correct(UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# prediction step
a__ =model(UpperCAmelCase__ , UpperCAmelCase__).sample
a__ =self.scheduler.step_pred(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__)
a__ , a__ =output.prev_sample, output.prev_sample_mean
a__ =sample_mean.clamp(0 , 1)
a__ =sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__ =self.numpy_to_pil(UpperCAmelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCAmelCase__)
| 20 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[int] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool:
__SCREAMING_SNAKE_CASE = input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = start_length
__SCREAMING_SNAKE_CASE = max_new_tokens
__SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict:
__SCREAMING_SNAKE_CASE = max_time
__SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool:
return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self )
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = stopping_criteria.max_length
__SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DiTPipeline
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = AutoencoderKL()
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Union[str, Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Any = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A_ : Tuple = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A_ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
A_ : Any = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
A_ : Any = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Tuple = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
A_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
A_ : List[str] = ['''vase''', '''umbrella''']
A_ : List[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Tuple = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 152 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = 'unispeech'
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : int="group" , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : Tuple=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=1_28 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=0.05 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Dict=3_20 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_56 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Tuple="mean" , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : Tuple=80 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : int=0.5 , **SCREAMING_SNAKE_CASE__ : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_ctc_classes
lowerCamelCase__ = vocab_size
lowerCamelCase__ = do_stable_layer_norm
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ = num_codevectors_per_group
lowerCamelCase__ = num_codevector_groups
lowerCamelCase__ = contrastive_logits_temperature
lowerCamelCase__ = feat_quantizer_dropout
lowerCamelCase__ = num_negatives
lowerCamelCase__ = codevector_dim
lowerCamelCase__ = proj_codevector_dim
lowerCamelCase__ = diversity_loss_weight
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# pretraining loss
lowerCamelCase__ = replace_prob
@property
def _UpperCamelCase ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 510 |
"""simple docstring"""
def snake_case ( _a: float , _a: float )-> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(1_25.50, 0.05) = }""")
| 510 | 1 |
import random
from typing import Any
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
for _ in range(len(lowercase ) ):
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__a : List[str] = [0, 1, 2, 3, 4, 5, 6, 7]
__a : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 522 | import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
elif args.student_type == "gpt2":
__lowercase = False
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowercase , required=lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowercase , required=lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowercase , required=lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowercase , type=lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowercase , required=lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowercase , default=4000 , help='''Checkpoint interval.''' )
__lowercase = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.student_type]
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowercase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowercase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowercase = tokenizer.all_special_tokens.index(lowercase )
__lowercase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowercase = special_tok_ids
__lowercase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
__lowercase = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowercase = 0.0 # do not predict special tokens
__lowercase = torch.from_numpy(lowercase )
else:
__lowercase = None
__lowercase = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowercase = student_config_class.from_pretrained(args.student_config )
__lowercase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowercase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
__lowercase = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__lowercase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowercase = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main() | 522 | 1 |
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
move_disk(lowerCamelCase , lowerCamelCase )
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
print("""moving disk from""" , lowerCamelCase , """to""" , lowerCamelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowerCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 80 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[Any] = 1
@register_to_config
def __init__( self : str , _lowerCAmelCase : int = 1000 , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[int]:
"""simple docstring"""
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def _a ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(_lowerCAmelCase )
__lowercase = []
def _a ( self : List[str] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : torch.FloatTensor , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : str ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 80 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def snake_case_ ( ):
__lowercase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
__lowercase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DownloadCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
ServeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
UserCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelLikeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
LfsCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
PTtoTFCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , "func" ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 655 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Any = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A (__magic_name__ ):
snake_case :List[str] = "glpn"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[8, 4, 2, 1] , UpperCamelCase_=[32, 64, 1_60, 2_56] , UpperCamelCase_=[7, 3, 3, 3] , UpperCamelCase_=[4, 2, 2, 2] , UpperCamelCase_=[1, 2, 5, 8] , UpperCamelCase_=[4, 4, 4, 4] , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-6 , UpperCamelCase_=64 , UpperCamelCase_=10 , UpperCamelCase_=-1 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : int = num_encoder_blocks
__UpperCAmelCase : Optional[Any] = depths
__UpperCAmelCase : List[Any] = sr_ratios
__UpperCAmelCase : Tuple = hidden_sizes
__UpperCAmelCase : int = patch_sizes
__UpperCAmelCase : str = strides
__UpperCAmelCase : Dict = mlp_ratios
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = drop_path_rate
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = decoder_hidden_size
__UpperCAmelCase : Any = max_depth
__UpperCAmelCase : Union[str, Any] = head_in_index
| 168 | '''simple docstring'''
import functools
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = len(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ )
@functools.cache
def min_distance(lowerCamelCase__ , lowerCamelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__UpperCAmelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase__ ) , 1 + min_distance(lowerCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
'''simple docstring'''
import random
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def snake_case__ ( lowercase__ : str ) ->tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCamelCase : List[str] = [ord(lowercase__ ) for i in text]
_UpperCamelCase : Dict = []
_UpperCamelCase : Optional[int] = []
for i in plain:
_UpperCamelCase : Tuple = random.randint(1 , 300 )
_UpperCamelCase : Any = (i + k) * k
cipher.append(lowercase__ )
key.append(lowercase__ )
return cipher, key
@staticmethod
def snake_case__ ( lowercase__ : list[int] , lowercase__ : list[int] ) ->str:
'''simple docstring'''
_UpperCamelCase : int = []
for i in range(len(lowercase__ ) ):
_UpperCamelCase : List[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase__ ) )
return "".join(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[int] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 705 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase_ : Tuple = """true"""
def __A ( UpperCAmelCase ,UpperCAmelCase=8_2 ,UpperCAmelCase=1_6 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(4_2 )
_UpperCamelCase : List[Any] = RegressionModel()
_UpperCamelCase : Any = deepcopy(UpperCAmelCase )
_UpperCamelCase : Tuple = RegressionDataset(length=UpperCAmelCase )
_UpperCamelCase : Union[str, Any] = DataLoader(UpperCAmelCase ,batch_size=UpperCAmelCase )
model.to(accelerator.device )
_UpperCamelCase , _UpperCamelCase : Dict = accelerator.prepare(UpperCAmelCase ,UpperCAmelCase )
return model, ddp_model, dataloader
def __A ( UpperCAmelCase ,UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : str = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
_UpperCamelCase : Optional[Any] = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(UpperCAmelCase ):
_UpperCamelCase : Tuple = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=UpperCAmelCase ,max_length=UpperCAmelCase )
return outputs
with accelerator.main_process_first():
_UpperCamelCase : str = dataset.map(
UpperCAmelCase ,batched=UpperCAmelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
_UpperCamelCase : Optional[int] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(UpperCAmelCase ):
if use_longest:
return tokenizer.pad(UpperCAmelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase ,padding="max_length" ,max_length=1_2_8 ,return_tensors="pt" )
return DataLoader(UpperCAmelCase ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=1_6 )
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
_UpperCamelCase : str = Accelerator(dispatch_batches=UpperCAmelCase ,split_batches=UpperCAmelCase )
_UpperCamelCase : Union[str, Any] = get_dataloader(UpperCAmelCase ,not dispatch_batches )
_UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = accelerator.prepare(UpperCAmelCase ,UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = []
for batch in dataloader:
_UpperCamelCase , _UpperCamelCase : int = batch.values()
with torch.no_grad():
_UpperCamelCase : Tuple = model(UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : List[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCAmelCase )
targs.append(UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : int = torch.cat(UpperCAmelCase ), torch.cat(UpperCAmelCase )
return logits, targs
def __A ( UpperCAmelCase ,UpperCAmelCase=8_2 ,UpperCAmelCase=False ,UpperCAmelCase=False ,UpperCAmelCase=1_6 ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_basic_setup(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
_UpperCamelCase , _UpperCamelCase : Tuple = generate_predictions(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
assert (
len(UpperCAmelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCAmelCase )}'''
def __A ( UpperCAmelCase = False ,UpperCAmelCase = False ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : int = evaluate.load("glue" ,"mrpc" )
_UpperCamelCase , _UpperCamelCase : Any = get_mrpc_setup(UpperCAmelCase ,UpperCAmelCase )
# First do baseline
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = setup["no"]
model.to(UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(UpperCAmelCase )
with torch.inference_mode():
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase )
_UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCAmelCase ,references=batch["labels"] )
_UpperCamelCase : List[str] = metric.compute()
# Then do distributed
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_UpperCamelCase : int = model(**UpperCAmelCase )
_UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
_UpperCamelCase : List[str] = batch["labels"]
_UpperCamelCase , _UpperCamelCase : List[str] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCAmelCase ,references=UpperCAmelCase )
_UpperCamelCase : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = Accelerator(split_batches=UpperCAmelCase ,dispatch_batches=UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(UpperCAmelCase ,UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_UpperCamelCase : int = Accelerator(split_batches=UpperCAmelCase ,dispatch_batches=UpperCAmelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(UpperCAmelCase ,9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
_UpperCamelCase : int = Accelerator()
test_torch_metrics(UpperCAmelCase ,5_1_2 )
accelerator.state._reset_state()
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 204 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178 |
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = mock.Mock()
snake_case : Any = 500
snake_case : int = {}
snake_case : int = HTTPError
snake_case : Dict = {}
# Download this model to make sure it's in the cache.
snake_case : Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : str = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = mock.Mock()
snake_case : int = 500
snake_case : Optional[int] = {}
snake_case : int = HTTPError
snake_case : int = {}
# Download this model to make sure it's in the cache.
snake_case : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
try:
snake_case : Tuple = tempfile.mktemp()
with open(UpperCamelCase__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , UpperCamelCase__ )
snake_case : List[str] = AlbertTokenizer.from_pretrained(UpperCamelCase__ )
finally:
os.remove(UpperCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , UpperCamelCase__ )
snake_case : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCamelCase ( cls ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Any = os.path.join(UpperCamelCase__ , "vocab.txt" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : List[str] = BertTokenizer(UpperCamelCase__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case : Dict = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ , repo_id="test-tokenizer" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
snake_case : Dict = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = os.path.join(UpperCamelCase__ , "vocab.txt" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Optional[int] = BertTokenizer(UpperCamelCase__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case : Dict = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
snake_case : Optional[int] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = os.path.join(UpperCamelCase__ , "vocab.txt" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Any = CustomTokenizer(UpperCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = os.path.join(UpperCamelCase__ , "vocab.txt" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Optional[Any] = BertTokenizerFast.from_pretrained(UpperCamelCase__ )
bert_tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Any = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Tuple = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : str = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Union[str, Any] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Optional[int] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : str = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = Trie()
snake_case : Union[str, Any] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase__ , ["AB", "C"] )
| 178 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = LayoutLMTokenizer
a_ = LayoutLMTokenizerFast
a_ = True
a_ = True
def A ( self : Tuple ) -> Tuple:
super().setUp()
UpperCAmelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A ( self : Tuple , **_A : Union[str, Any] ) -> Tuple:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A ( self : Tuple , _A : str ) -> Optional[int]:
UpperCAmelCase_ : Any = '''UNwant\u00E9d,running'''
UpperCAmelCase_ : Any = '''unwanted, running'''
return input_text, output_text
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def A ( self : Optional[int] ) -> Union[str, Any]:
pass
| 713 |
'''simple docstring'''
def __UpperCAmelCase ( A : List[str] , A : Tuple , A : Union[str, Any]=False ) -> Tuple:
if isinstance(A , A ) and isinstance(A , A ):
UpperCAmelCase_ : Any = len(set_a.intersection(A ) )
if alternative_union:
UpperCAmelCase_ : Optional[Any] = len(A ) + len(A )
else:
UpperCAmelCase_ : Dict = len(set_a.union(A ) )
return intersection / union
if isinstance(A , (list, tuple) ) and isinstance(A , (list, tuple) ):
UpperCAmelCase_ : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase_ : Tuple = len(A ) + len(A )
return len(A ) / union
else:
UpperCAmelCase_ : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(A ) / len(A )
return len(A ) / len(A )
return None
if __name__ == "__main__":
_UpperCamelCase : Any = {'a', 'b', 'c', 'd', 'e'}
_UpperCamelCase : Optional[int] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 216 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :List[str] = ['image_processor', 'tokenizer']
_UpperCAmelCase :List[str] = 'BlipImageProcessor'
_UpperCAmelCase :Optional[Any] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : Union[str, Any] = self.tokenizer
UpperCamelCase : str = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Union[str, Any] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.tokenizer.model_input_names
UpperCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 629 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : List[str] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCamelCase : str = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase : Dict = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , "README.md" )
print(F"""Generating {path}""" )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
__lowerCamelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCamelCase : Optional[Any] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = model_name.split("""-""")
__lowerCamelCase : str = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 629 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """gptsan-japanese"""
snake_case = [
"""past_key_values""",
]
snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=3_60_00 , UpperCAmelCase_=12_80 , UpperCAmelCase_=10_24 , UpperCAmelCase_=81_92 , UpperCAmelCase_=40_96 , UpperCAmelCase_=1_28 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_=16 , UpperCAmelCase_=16 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.0 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=0.002 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=3_59_98 , UpperCAmelCase_=3_59_95 , UpperCAmelCase_=3_59_99 , **UpperCAmelCase_ , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = d_ff
snake_case_ = d_ext
snake_case_ = d_spout
snake_case_ = num_switch_layers
snake_case_ = num_ext_layers
snake_case_ = num_switch_layers + num_ext_layers
snake_case_ = num_heads
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = router_bias
snake_case_ = router_jitter_noise
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = output_hidden_states
snake_case_ = output_attentions
snake_case_ = initializer_factor
snake_case_ = output_router_logits
snake_case_ = use_cache
super().__init__(
separator_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 420 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __snake_case ( lowercase : float , lowercase : float , lowercase : bool = False ):
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def __snake_case ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : float = 10**-1 ):
snake_case_ = cross(lowercase , lowercase )
snake_case_ = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 420 | 1 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
A = re.compile(R'([A-Z]+)([A-Z][a-z])')
A = re.compile(R'([a-z\d])([A-Z])')
A = re.compile(R'(?<!_)_(?!_)')
A = re.compile(R'(_{2,})')
A = R'^\w+(\.\w+)*$'
A = R'<>:/\|?*'
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[Any] ):
"""simple docstring"""
snake_case : str = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase_ )
snake_case : Union[str, Any] = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase_ )
return name.lower()
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any ):
"""simple docstring"""
snake_case : List[Any] = _single_underscore_re.split(lowerCamelCase_ )
snake_case : int = [_multiple_underscores_re.split(lowerCamelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase_ ) if n != "" )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: List[str] ):
"""simple docstring"""
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any , lowerCamelCase_: Optional[int] ):
"""simple docstring"""
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowerCamelCase_ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowerCamelCase_ )}-{split}'''
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any , lowerCamelCase_: str , lowerCamelCase_: List[str] , lowerCamelCase_: Tuple=None ):
"""simple docstring"""
snake_case : Tuple = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
snake_case : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
return f'''{filepath}*'''
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Any , lowerCamelCase_: List[Any] , lowerCamelCase_: Optional[int]=None , lowerCamelCase_: List[Any]=None ):
"""simple docstring"""
snake_case : int = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
snake_case : Dict = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if shard_lengths:
snake_case : Dict = len(lowerCamelCase_ )
snake_case : int = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowerCamelCase_ )]
if filetype_suffix:
snake_case : List[str] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 449 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 396 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def A_ ( __UpperCamelCase : str ):
lowercase = min(__UpperCamelCase ) # min() finds the minimum value
lowercase = max(__UpperCamelCase ) # max() finds the maximum value
lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase = 0
for count in range(__UpperCamelCase ):
while holes[count] > 0:
holes[count] -= 1
lowercase = count + min_val
i += 1
def A_ ( ):
lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCamelCase )
print('''Sorted order is:''' , ''' '''.join(__UpperCamelCase ) )
if __name__ == "__main__":
main() | 396 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42 # [batch_size x 3]
__UpperCAmelCase = 42 # [batch_size x 3]
__UpperCAmelCase = 42 # [batch_size x 3]
__UpperCAmelCase = 42 # [batch_size x 3]
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def lowercase_ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase_ ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase_ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase_ ( self ):
__snake_case : List[Any] = torch.arange(self.height * self.width )
__snake_case : str = torch.stack(
[
pixel_indices % self.width,
torch.div(_UpperCAmelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase_ ( self ):
__snake_case , *__snake_case : Dict = self.shape
__snake_case : Tuple = int(np.prod(_UpperCAmelCase ) )
__snake_case : List[Any] = self.get_image_coords()
__snake_case : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__snake_case : Optional[int] = self.get_camera_rays(_UpperCAmelCase )
__snake_case : Dict = rays.view(_UpperCAmelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case , *__snake_case , __snake_case : str = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__snake_case : Any = coords.view(_UpperCAmelCase , -1 , 2 )
__snake_case : Optional[Any] = self.resolution()
__snake_case : int = self.fov()
__snake_case : Dict = (flat.float() / (res - 1)) * 2 - 1
__snake_case : str = fracs * torch.tan(fov / 2 )
__snake_case : Any = fracs.view(_UpperCAmelCase , -1 , 2 )
__snake_case : Dict = (
self.z.view(_UpperCAmelCase , 1 , 3 )
+ self.x.view(_UpperCAmelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_UpperCAmelCase , 1 , 3 ) * fracs[:, :, 1:]
)
__snake_case : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=_UpperCAmelCase )
__snake_case : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(_UpperCAmelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_UpperCAmelCase , *_UpperCAmelCase , 2 , 3 )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_UpperCAmelCase , height=_UpperCAmelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Tuple = []
__snake_case : int = []
__snake_case : Optional[Any] = []
__snake_case : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__snake_case : Optional[int] = np.array([np.sin(__UpperCAmelCase ), np.cos(__UpperCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__snake_case : List[Any] = -z * 4
__snake_case : List[str] = np.array([np.cos(__UpperCAmelCase ), -np.sin(__UpperCAmelCase ), 0.0] )
__snake_case : Optional[Any] = np.cross(__UpperCAmelCase , __UpperCAmelCase )
origins.append(__UpperCAmelCase )
xs.append(__UpperCAmelCase )
ys.append(__UpperCAmelCase )
zs.append(__UpperCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , width=__UpperCAmelCase , height=__UpperCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCAmelCase )) , )
| 576 | import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Tuple = filter(lambda __UpperCAmelCase : p.requires_grad , model.parameters() )
__snake_case : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__magic_name__ = logging.getLogger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ):
if metric == "rouge2":
__snake_case : Dict = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__snake_case : Any = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__snake_case : Dict = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
__snake_case : List[Any] = ModelCheckpoint(
dirpath=__UpperCAmelCase , filename=__UpperCAmelCase , monitor=F"""val_{metric}""" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__UpperCAmelCase , verbose=__UpperCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__snake_case : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__snake_case : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
__snake_case : Union[str, Any] = od / 'test_results.txt'
__snake_case : Union[str, Any] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__snake_case : Tuple = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__snake_case : List[str] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , 'a+' ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__snake_case : Tuple = metrics[key]
if isinstance(_UpperCAmelCase , torch.Tensor ):
__snake_case : List[Any] = val.item()
__snake_case : Dict = F"""{key}: {val:.6f}\n"""
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
__snake_case : Optional[int] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_UpperCAmelCase )
@rank_zero_only
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
try:
__snake_case : Any = pl_module.model.model.num_parameters()
except AttributeError:
__snake_case : List[Any] = pl_module.model.num_parameters()
__snake_case : List[str] = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase , _UpperCAmelCase , 'test' )
@rank_zero_only
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 576 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
A_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
A__ , A__ : Union[str, Any] = create_model(
"""HTSAT-tiny""", """roberta""", UpperCAmelCase__, precision="""fp32""", device="""cuda:0""" if torch.cuda.is_available() else """cpu""", enable_fusion=UpperCAmelCase__, fusion_type="""aff_2d""" if enable_fusion else None, )
return model, model_cfg
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->List[Any]:
A__ : Optional[Any] = {}
A__ : int = R""".*sequential.(\d+).*"""
A__ : int = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A__ : Optional[Any] = key.replace(UpperCAmelCase__, UpperCAmelCase__ )
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
# replace sequential layers with list
A__ : int = re.match(UpperCAmelCase__, UpperCAmelCase__ ).group(1 )
A__ : Dict = key.replace(f'sequential.{sequential_layer}.', f'layers.{int(UpperCAmelCase__ )//3}.linear.' )
elif re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A__ : Dict = int(re.match(UpperCAmelCase__, UpperCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A__ : Dict = 1 if projecton_layer == 0 else 2
A__ : Union[str, Any] = key.replace(f'_projection.{projecton_layer}.', f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A__ : Tuple = value
A__ : Optional[int] = mixed_qkv.size(0 ) // 3
A__ : Optional[int] = mixed_qkv[:qkv_dim]
A__ : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
A__ : Tuple = mixed_qkv[qkv_dim * 2 :]
A__ : Optional[Any] = query_layer
A__ : int = key_layer
A__ : Union[str, Any] = value_layer
else:
A__ : List[str] = value
return model_state_dict
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : List[Any]=False ) ->Union[str, Any]:
A__ , A__ : List[Any] = init_clap(UpperCAmelCase__, enable_fusion=UpperCAmelCase__ )
clap_model.eval()
A__ : Dict = clap_model.state_dict()
A__ : str = rename_state_dict(UpperCAmelCase__ )
A__ : Dict = ClapConfig()
A__ : str = enable_fusion
A__ : int = ClapModel(UpperCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
transformers_config.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
A_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 498 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any]=None ) ->Tuple:
A__ : Dict = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ : Any = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Tuple = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : str = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str=None ) ->List[str]:
A__ : Optional[Any] = None
if token is not None:
A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ : Dict = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : Union[str, Any] = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Tuple = None
if token is not None:
A__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Tuple = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : Dict = result.headers["""Location"""]
A__ : Union[str, Any] = requests.get(UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : int = os.path.join(UpperCAmelCase__, f'{artifact_name}.zip' )
with open(UpperCAmelCase__, """wb""" ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=None ) ->Tuple:
A__ : int = []
A__ : Union[str, Any] = []
A__ : Optional[Any] = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
A__ : int = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ : List[str] = line[: line.index(""": """ )]
A__ : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
A__ : Any = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '
f'and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ : List[str] = None
if job_name and job_links:
A__ : Any = job_links.get(UpperCAmelCase__, UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
A__ : str = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ )]
return result
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int=None ) ->str:
A__ : List[Any] = []
A__ : Dict = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__, job_links=UpperCAmelCase__ ) )
return errors
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict=None ) ->List[Any]:
A__ : Dict = Counter()
counter.update([x[1] for x in logs] )
A__ : str = counter.most_common()
A__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ : Optional[int] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ : List[str] = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ : Union[str, Any] = test.split("""/""" )[2]
else:
A__ : int = None
return test
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str=None ) ->Optional[Any]:
A__ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ : List[Any] = [x for x in logs if x[2] is not None]
A__ : Union[str, Any] = {x[2] for x in logs}
A__ : int = {}
for test in tests:
A__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ : Any = counter.most_common()
A__ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
A__ : str = {"""count""": n_errors, """errors""": error_counts}
A__ : Dict = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[Any]:
A__ : List[Any] = """| no. | error | status |"""
A__ : Union[str, Any] = """|-:|:-|:-|"""
A__ : Dict = [header, sep]
for error in reduced_by_error:
A__ : List[Any] = reduced_by_error[error]["""count"""]
A__ : List[Any] = f'| {count} | {error[:1_0_0]} | |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : str = """| model | no. of errors | major error | count |"""
A__ : Optional[int] = """|-:|-:|-:|-:|"""
A__ : Tuple = [header, sep]
for model in reduced_by_model:
A__ : Optional[Any] = reduced_by_model[model]["""count"""]
A__ , A__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ : Optional[int] = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ = get_job_links(args.workflow_run_id, token=args.token)
A_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ = k.find(''' / ''')
A_ = k[index + len(''' / ''') :]
A_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ = reduce_by_error(errors)
A_ = reduce_by_model(errors)
A_ = make_github_table(reduced_by_error)
A_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 498 | 1 |
"""simple docstring"""
snake_case = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
snake_case = {value: key for key, value in encode_dict.items()}
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def snake_case ( lowerCAmelCase_ ) -> str:
if set(lowerCAmelCase_ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
_snake_case = ''''''
for word in coded.split():
while len(lowerCAmelCase_ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 103 |
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ , UpperCAmelCase__: int = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase__ , UpperCAmelCase__: str = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] =input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""") | 113 | 0 |
"""simple docstring"""
def _lowerCAmelCase(a : int = 1000 ) -> int:
_SCREAMING_SNAKE_CASE =2**power
_SCREAMING_SNAKE_CASE =0
while n:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 165 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase_ : Union[str, Any] = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
UpperCAmelCase_ : Union[str, Any] = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
UpperCAmelCase_ : Any = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def _lowerCAmelCase(a : Optional[int] , a : Union[str, Any] , a : Optional[int] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> Any:
if label_map is not None:
for old_id, new_id in label_map.items():
_SCREAMING_SNAKE_CASE =new_id
# turn into Numpy arrays
_SCREAMING_SNAKE_CASE =np.array(a )
_SCREAMING_SNAKE_CASE =np.array(a )
if reduce_labels:
_SCREAMING_SNAKE_CASE =255
_SCREAMING_SNAKE_CASE =label - 1
_SCREAMING_SNAKE_CASE =255
_SCREAMING_SNAKE_CASE =label != ignore_index
_SCREAMING_SNAKE_CASE =np.not_equal(a , a )
_SCREAMING_SNAKE_CASE =pred_label[mask]
_SCREAMING_SNAKE_CASE =np.array(a )[mask]
_SCREAMING_SNAKE_CASE =pred_label[pred_label == label]
_SCREAMING_SNAKE_CASE =np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
_SCREAMING_SNAKE_CASE =np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
_SCREAMING_SNAKE_CASE =np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
_SCREAMING_SNAKE_CASE =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _lowerCAmelCase(a : int , a : Any , a : List[str] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> List[Any]:
_SCREAMING_SNAKE_CASE =np.zeros((num_labels,) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros((num_labels,) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros((num_labels,) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _lowerCAmelCase(a : int , a : str , a : Tuple , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =total_area_intersect.sum() / total_area_label.sum()
_SCREAMING_SNAKE_CASE =total_area_intersect / total_area_union
_SCREAMING_SNAKE_CASE =total_area_intersect / total_area_label
_SCREAMING_SNAKE_CASE =np.nanmean(a )
_SCREAMING_SNAKE_CASE =np.nanmean(a )
_SCREAMING_SNAKE_CASE =all_acc
_SCREAMING_SNAKE_CASE =iou
_SCREAMING_SNAKE_CASE =acc
if nan_to_num is not None:
_SCREAMING_SNAKE_CASE ={metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def UpperCamelCase_ ( self , _A , _A , _A , _A , _A = None , _A = None , _A = False , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =mean_iou(
results=_A , gt_seg_maps=_A , num_labels=_A , ignore_index=_A , nan_to_num=_A , label_map=_A , reduce_labels=_A , )
return iou_result
| 165 | 1 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a = 2048
__a = 4096
__a = 42
__a = os.environ.pop("PROCESS_TRAIN", "false")
__a = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def __snake_case( _lowerCAmelCase ) -> Any:
def choose_first(_lowerCAmelCase , _lowerCAmelCase=False ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
snake_case__ : Dict = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case__ : Any = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
snake_case__ : Union[str, Any] = {"""id""": example["""id"""]}
snake_case__ : int = example["""annotations"""]
snake_case__ : Tuple = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case__ : Optional[int] = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
snake_case__ : int = []
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = ["""<cls>"""]
else:
snake_case__ : str = ["""short"""]
snake_case__ : str = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
snake_case__ : Optional[int] = ["""long"""]
snake_case__ : Union[str, Any] = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCAmelCase )
snake_case__ : Optional[Any] = []
answer.update(_lowerCAmelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case__ : Dict = True
else:
snake_case__ : List[str] = False
snake_case__ : Dict = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCAmelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
snake_case__ : Optional[Any] = _get_single_answer(_lowerCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ : Dict = example["""document"""]["""tokens"""]
snake_case__ : List[str] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case__ : List[str] = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case__ : List[Any] = example["""document"""]["""tokens"""]
snake_case__ : str = answer["""start_token"""]
snake_case__ : str = answer["""end_token"""]
snake_case__ : Optional[Any] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case__ : List[str] = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case__ : Optional[Any] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ : str = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ : Any = """ """.join([old[i] for i in range(len(_lowerCAmelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCAmelCase , end="""\n""" )
print("""Old:""" , _lowerCAmelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=True ) -> int:
# overlap will be of doc_stride - q_len
snake_case__ : str = get_context_and_ans(_lowerCAmelCase , assertion=_lowerCAmelCase )
snake_case__ : Tuple = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case__ : str = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
snake_case__ : Optional[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ : int = []
snake_case__ : List[str] = []
snake_case__ : Optional[int] = input_ids[:q_len]
snake_case__ : List[Any] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case__ : str = i + max_length - q_len
snake_case__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCAmelCase ),
"end_token": [-100] * len(_lowerCAmelCase ),
"category": category,
},
}
snake_case__ : Union[str, Any] = out["""context"""].split()
snake_case__ : List[Any] = splitted_context[answer["""end_token"""]]
snake_case__ : Optional[int] = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCAmelCase , ).input_ids )
snake_case__ : Union[str, Any] = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case__ : str = len(tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case__ : List[str] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
snake_case__ : Dict = answer["""start_token"""]
snake_case__ : str = answer["""end_token"""]
if assertion:
snake_case__ : List[str] = tokenizer.decode(_lowerCAmelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCAmelCase , end="""\n\n""" )
if len(_lowerCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case__ : str = input_ids[:q_len]
snake_case__ : List[Any] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = []
snake_case__ : Tuple = []
snake_case__ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case__ : Optional[int] = i + max_length - q_len
snake_case__ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case__ : int = start_token - i + q_len
snake_case__ : Optional[Any] = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
snake_case__ : List[Any] = -100
snake_case__ : Optional[Any] = -100
answers_category.append("""null""" )
snake_case__ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCAmelCase )
answers_end_token.append(_lowerCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCAmelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCAmelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=False ) -> Optional[int]:
snake_case__ : Optional[Any] = get_strided_contexts_and_ans(
_lowerCAmelCase , _lowerCAmelCase , doc_stride=_lowerCAmelCase , max_length=_lowerCAmelCase , assertion=_lowerCAmelCase , )
return example
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
with jsonlines.open(_lowerCAmelCase , """a""" ) as writer:
for example in tqdm(_lowerCAmelCase , total=len(_lowerCAmelCase ) , desc="""Saving samples ... """ ):
snake_case__ : Tuple = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a = load_dataset("natural_questions")
__a = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__a = data["train" if PROCESS_TRAIN == "true" else "validation"]
__a = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__a = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__a = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 374 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 374 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """backbone.""" if is_semantic else """"""
_SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE = """backbone.""" if is_semantic else """"""
# queries, keys and values
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = q_bias
_SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
_SCREAMING_SNAKE_CASE = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
_SCREAMING_SNAKE_CASE = gamma_a
_SCREAMING_SNAKE_CASE = gamma_a
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False if """rvlcdip""" in checkpoint_url else True
_SCREAMING_SNAKE_CASE = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
# labels
if "rvlcdip" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """rvlcdip-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
_SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
_SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify logits
_SCREAMING_SNAKE_CASE = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
_SCREAMING_SNAKE_CASE = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
_SCREAMING_SNAKE_CASE = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
UpperCamelCase__ : Any = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__lowerCAmelCase = True
except ImportError:
__lowerCAmelCase = False
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( lowercase_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
@staticmethod
def _a ( UpperCamelCase__ ):
"""simple docstring"""
a_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=UpperCamelCase__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=UpperCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , *UpperCamelCase__ ):
"""simple docstring"""
a_ = testing
a_ = testing_file
a_ = path
def _a ( self ):
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
a_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(UpperCamelCase__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
a_ = (
Path(UpperCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
a_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
a_ = json.load(UpperCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase__ , extra_context=UpperCamelCase__ , )
a_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
a_ = json.load(UpperCamelCase__ )
a_ = configuration['lowercase_modelname']
a_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json' )
a_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
a_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
a_ = 'Flax' in generate_tensorflow_pytorch_and_flax
a_ = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=UpperCamelCase__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(UpperCamelCase__ ):
with open(UpperCamelCase__ , 'r' ) as f:
a_ = f.readlines()
with open(UpperCamelCase__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Create temp file
a_ , a_ = mkstemp()
a_ = False
with fdopen(UpperCamelCase__ , 'w' ) as new_file:
with open(UpperCamelCase__ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase__ )
if line_to_copy_below in line:
a_ = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase__ , UpperCamelCase__ )
# Remove original file
remove(UpperCamelCase__ )
# Move new file
move(UpperCamelCase__ , UpperCamelCase__ )
def skip_units(UpperCamelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase__ ):
with open(UpperCamelCase__ ) as datafile:
a_ = []
a_ = False
a_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
a_ = line.split('"' )[1]
a_ = skip_units(UpperCamelCase__ )
elif "# Below: " in line and "##" not in line:
a_ = line.split('"' )[1]
a_ = skip_units(UpperCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
a_ = []
elif "# Replace with" in line and "##" not in line:
a_ = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase__ )
remove(UpperCamelCase__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase__ )
| 536 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE (enum.Enum ):
"""simple docstring"""
_a : Tuple = 0
_a : List[str] = 1
_a : int = 2
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : str = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def _a ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
a_ = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['max_new_tokens']
else:
a_ = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
a_ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['attention_mask'][:, -keep_length:]
return inputs
def _a ( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
a_ = model_inputs['input_ids']
a_ = model_inputs.get('attention_mask' , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
a_ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ):
"""simple docstring"""
a_ = model_outputs['generated_sequence'][0]
a_ = model_outputs['input_ids']
a_ = model_outputs['prompt_text']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'generated_text': all_text}
records.append(UpperCamelCase__ )
return records
| 536 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case__ : Tuple = logging.getLogger(__name__)
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase = field(default=a__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case_:
__UpperCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowerCAmelCase : int = import_module('''tasks''' )
try:
lowerCAmelCase : Optional[Any] = getattr(_snake_case , model_args.task_type )
lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase : str = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase : Dict[int, str] = dict(enumerate(_snake_case ) )
lowerCAmelCase : str = len(_snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , idalabel=_snake_case , labelaid={label: i for i, label in enumerate(_snake_case )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase : int = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase : Any = (
TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase : Dict = (
TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_snake_case : np.ndarray , _snake_case : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase : List[str] = np.argmax(_snake_case , axis=2 )
lowerCAmelCase, lowerCAmelCase : str = preds.shape
lowerCAmelCase : Union[str, Any] = [[] for _ in range(_snake_case )]
lowerCAmelCase : Optional[Any] = [[] for _ in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_snake_case : EvalPrediction ) -> Dict:
lowerCAmelCase, lowerCAmelCase : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_snake_case , _snake_case ),
"precision": precision_score(_snake_case , _snake_case ),
"recall": recall_score(_snake_case , _snake_case ),
"f1": fa_score(_snake_case , _snake_case ),
}
# Data collator
lowerCAmelCase : str = DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase : Any = Trainer(
model=_snake_case , args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , compute_metrics=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : Tuple = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase : Union[str, Any] = trainer.evaluate()
lowerCAmelCase : List[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _snake_case , _snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_snake_case )
# Predict
if training_args.do_predict:
lowerCAmelCase : List[Any] = TokenClassificationDataset(
token_classification_task=_snake_case , data_dir=data_args.data_dir , tokenizer=_snake_case , labels=_snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = trainer.predict(_snake_case )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = align_predictions(_snake_case , _snake_case )
lowerCAmelCase : List[Any] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(_snake_case , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , _snake_case , _snake_case )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowerCAmelCase : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(_snake_case , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(_snake_case , _snake_case , _snake_case )
return results
def _snake_case ( _snake_case : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 637 |
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCAmelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __snake_case ( self : int ):
UpperCAmelCase = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
UpperCAmelCase = [sys.executable] + distributed_args
execute_subprocess_async(a__ , env=os.environ.copy() )
| 51 |
import os
def __UpperCAmelCase( ):
with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file:
_lowerCamelCase : Optional[int] = str(file.readlines()[0] )
_lowerCamelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 114 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _lowerCAmelCase( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Dict = sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : x[0] / x[1] , reverse=_lowerCAmelCase )
snake_case__ : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
snake_case__ : int = list(accumulate(_lowerCAmelCase ) )
snake_case__ : List[str] = bisect(_lowerCAmelCase , _lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path / """cache"""
snake_case__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Tuple = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : int = features.copy() if features else default_expected_features
snake_case__ : int = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : List[str] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Union[str, Any] = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Dict = [parquet_path]
snake_case__ : int = tmp_path / """cache"""
snake_case__ : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : int = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=("train",) ) -> List[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
snake_case__ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[str] = tmp_path / """cache"""
snake_case__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Union[str, Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : List[Any] = tmp_path / """cache"""
snake_case__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : Any = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
if split:
snake_case__ : List[str] = {split: parquet_path}
else:
snake_case__ : Optional[int] = """train"""
snake_case__ : Tuple = {"""train""": parquet_path, """test""": parquet_path}
snake_case__ : Optional[Any] = tmp_path / """cache"""
snake_case__ : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
snake_case__ : Tuple = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
snake_case__ : Any = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ : Optional[Any] = pq.ParquetFile(tmp_path / """foo.parquet""" )
snake_case__ : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : int = str(shared_datadir / """test_image_rgb.jpg""" )
snake_case__ : List[Any] = {"""image""": [image_path]}
snake_case__ : Dict = Features({"""image""": Image()} )
snake_case__ : Optional[int] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
snake_case__ : str = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
snake_case__ : Dict = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
snake_case__ : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 301 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = VideoToVideoSDPipeline
lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
lowerCamelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
lowerCamelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCamelCase__ : int = False
# No `output_type`.
lowerCamelCase__ : List[Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(A_ )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowercase_ ( self , A_ , A_=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline(**A_ )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ )
SCREAMING_SNAKE_CASE__ = '''np'''
SCREAMING_SNAKE_CASE__ = sd_pipe(**A_ ).frames
SCREAMING_SNAKE_CASE__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = torch.randn((1, 10, 3, 10_24, 5_76) , generator=A_ )
SCREAMING_SNAKE_CASE__ = video.to('''cuda''' )
SCREAMING_SNAKE_CASE__ = '''Spiderman is surfing'''
SCREAMING_SNAKE_CASE__ = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='''pt''' ).frames
SCREAMING_SNAKE_CASE__ = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 100 | import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :Dict = tmp_path / '''file.csv'''
lowerCamelCase :str = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Dict = tmp_path / '''malformed_file.csv'''
lowerCamelCase :Optional[int] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def _lowerCamelCase ( a_ : int , a_ : Union[str, Any]):
lowerCamelCase :Tuple = tmp_path / '''csv_with_image.csv'''
lowerCamelCase :int = textwrap.dedent(
F"\\n image\n {image_file}\n ")
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :int = tmp_path / '''csv_with_label.csv'''
lowerCamelCase :Optional[int] = textwrap.dedent(
'''\
label
good
bad
good
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
@pytest.fixture
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[Any] = tmp_path / '''csv_with_int_list.csv'''
lowerCamelCase :Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''')
with open(a_ , '''w''') as f:
f.write(a_)
return str(a_)
def _lowerCamelCase ( a_ : int , a_ : Optional[int] , a_ : str):
lowerCamelCase :Any = Csv()
lowerCamelCase :str = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(a_ , match='''Error tokenizing data'''):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a_) in record.message
for record in caplog.records)
@require_pil
def _lowerCamelCase ( a_ : str):
with open(a_ , encoding='''utf-8''') as f:
lowerCamelCase :str = f.read().splitlines()[1]
lowerCamelCase :Optional[Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()}))
lowerCamelCase :List[Any] = csv._generate_tables([[csv_file_with_image]])
lowerCamelCase :int = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''image''').type == Image()()
lowerCamelCase :int = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _lowerCamelCase ( a_ : Any):
with open(a_ , encoding='''utf-8''') as f:
lowerCamelCase :Union[str, Any] = f.read().splitlines()[1:]
lowerCamelCase :List[str] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''])}))
lowerCamelCase :str = csv._generate_tables([[csv_file_with_label]])
lowerCamelCase :Any = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field('''label''').type == ClassLabel(names=['''good''', '''bad'''])()
lowerCamelCase :Tuple = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad''']).straint(a_) for label in labels]
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :Optional[Any] = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a_: [int(a_) for i in x.split()]})
lowerCamelCase :Optional[int] = csv._generate_tables([[csv_file_with_int_list]])
lowerCamelCase :List[str] = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field('''int_list''').type)
lowerCamelCase :int = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 166 | 0 |
from math import ceil, sqrt
def UpperCamelCase ( lowerCAmelCase_ = 1_00_00_00 ) -> int:
'''simple docstring'''
_A= 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_A= max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_A= 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }") | 476 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : List[str] ="""convbert"""
def __init__( self , lowerCAmelCase__=30522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=768 , lowerCAmelCase__=2 , lowerCAmelCase__=9 , lowerCAmelCase__=1 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_A= vocab_size
_A= hidden_size
_A= num_hidden_layers
_A= num_attention_heads
_A= intermediate_size
_A= hidden_act
_A= hidden_dropout_prob
_A= attention_probs_dropout_prob
_A= max_position_embeddings
_A= type_vocab_size
_A= initializer_range
_A= layer_norm_eps
_A= embedding_size
_A= head_ratio
_A= conv_kernel_size
_A= num_groups
_A= classifier_dropout
class lowerCAmelCase ( _a ):
@property
def a__ ( self ):
if self.task == "multiple-choice":
_A= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 476 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
"""simple docstring"""
import functools
from typing import Any
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] ) -> bool:
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowerCamelCase_ = {}
lowerCamelCase_ = 'WORD_KEEPER'
for word in words:
lowerCamelCase_ = trie
for c in word:
if c not in trie_node:
lowerCamelCase_ = {}
lowerCamelCase_ = trie_node[c]
lowerCamelCase_ = True
lowerCamelCase_ = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase : int ) -> bool:
if index == len_string:
return True
lowerCamelCase_ = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 714 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> int:
__A : Union[str, Any] = SwinConfig(image_size=1_92 )
if "base" in model_name:
__A : Any = 6
__A : Tuple = 1_28
__A : str = (2, 2, 18, 2)
__A : Dict = (4, 8, 16, 32)
elif "large" in model_name:
__A : str = 12
__A : Tuple = 1_92
__A : str = (2, 2, 18, 2)
__A : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__A : int = window_size
__A : Optional[int] = embed_dim
__A : List[Any] = depths
__A : Dict = num_heads
return config
def _lowerCAmelCase ( __snake_case : str ) -> Dict:
if "encoder.mask_token" in name:
__A : str = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__A : Optional[int] = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__A : str = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__A : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__A : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__A : Any = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__A : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__A : str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__A : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__A : str = 'layernorm.weight'
if name == "encoder.norm.bias":
__A : List[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
__A : List[str] = 'swin.' + name
return name
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : Any ) -> List[str]:
for key in orig_state_dict.copy().keys():
__A : List[str] = orig_state_dict.pop(__snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A : Tuple = key.split('.' )
__A : List[str] = int(key_split[2] )
__A : Dict = int(key_split[4] )
__A : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A : str = val[:dim, :]
__A : Dict = val[
dim : dim * 2, :
]
__A : Any = val[-dim:, :]
else:
__A : Tuple = val[
:dim
]
__A : Any = val[
dim : dim * 2
]
__A : Union[str, Any] = val[
-dim:
]
else:
__A : Tuple = val
return orig_state_dict
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : Optional[int] = torch.load(__snake_case , map_location='cpu' )['model']
__A : str = get_swin_config(__snake_case )
__A : str = SwinForMaskedImageModeling(__snake_case )
model.eval()
__A : Tuple = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
__A : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__A : Optional[int] = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
__A : Tuple = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
__A : List[Any] = image_processor(images=__snake_case , return_tensors='pt' )
with torch.no_grad():
__A : List[str] = model(**__snake_case ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 8 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : Tuple = len(snake_case__ ) - 1
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,snake_case__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(snake_case__ ) ,5 ) == 1
return output_values
def snake_case ( self ,snake_case__ ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : int = self.basis_function(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self ,snake_case__ = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : int = self.bezier_curve_function(snake_case__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Dict = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
snake_case__ ,snake_case__ ,color='blue' ,label='Curve of Degree ' + str(self.degree ) ,)
plt.scatter(snake_case__ ,snake_case__ ,color='red' ,label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 105 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__a = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
__a = {
"169M": 7_68,
"430M": 10_24,
"1B5": 20_48,
"3B": 25_60,
"7B": 40_96,
"14B": 51_20,
}
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ :List[str] = state_dict.pop(_lowercase )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case_ :Any = name.replace("""emb.""", """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case_ :Dict = name.replace("""blocks.0.ln0""", """blocks.0.pre_ln""" )
# att -> attention
snake_case_ :List[str] = re.sub(r"""blocks\.(\d+)\.att""", r"""blocks.\1.attention""", _lowercase )
# ffn -> feed_forward
snake_case_ :Dict = re.sub(r"""blocks\.(\d+)\.ffn""", r"""blocks.\1.feed_forward""", _lowercase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case_ :str = name.replace(""".time_mix_k""", """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case_ :List[Any] = name.replace(""".time_mix_v""", """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case_ :Dict = name.replace(""".time_mix_r""", """.time_mix_receptance""" )
if name != "head.weight":
snake_case_ :Optional[Any] = """rwkv.""" + name
snake_case_ :int = weight
return state_dict
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=False, _lowercase=None ):
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case_ :Dict = 50277
snake_case_ :Optional[Any] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case_ :List[Any] = PreTrainedTokenizerFast(tokenizer_file=_lowercase )
snake_case_ :int = len(_lowercase )
tokenizer.save_pretrained(_lowercase )
# 2. Build the config
snake_case_ :Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ :str = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
snake_case_ :Union[str, Any] = RwkvConfig(
vocab_size=_lowercase, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(_lowercase )
# 3. Download model file then convert state_dict
snake_case_ :List[str] = hf_hub_download(_lowercase, _lowercase )
snake_case_ :int = torch.load(_lowercase, map_location="""cpu""" )
snake_case_ :Any = convert_state_dict(_lowercase )
# 4. Split in shards and save
snake_case_, snake_case_ :Union[str, Any] = shard_checkpoint(_lowercase )
for shard_file, shard in shards.items():
torch.save(_lowercase, os.path.join(_lowercase, _lowercase ) )
if index is not None:
snake_case_ :List[str] = os.path.join(_lowercase, _lowercase )
# Save the index as well
with open(_lowercase, """w""", encoding="""utf-8""" ) as f:
snake_case_ :List[str] = json.dumps(_lowercase, indent=2, sort_keys=_lowercase ) + """\n"""
f.write(_lowercase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case_ :int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ :List[Any] = torch.load(os.path.join(_lowercase, _lowercase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(_lowercase, _lowercase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case_ :List[str] = AutoModelForCausalLM.from_pretrained(_lowercase )
model.push_to_hub(_lowercase, max_shard_size="""2GB""" )
tokenizer.push_to_hub(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
__a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 310 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
import math
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
while num > 0:
UpperCAmelCase = num % 8
UpperCAmelCase = octal + (remainder * math.floor(math.pow(10 , UpperCamelCase__ ) ))
counter += 1
UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(UpperCamelCase__ )}"""
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 130 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : List[str] = 16
a__ : Dict = 32
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ = DatasetDict(
{
'train': dataset['train'].select(A__ ),
'validation': dataset['train'].select(A__ ),
'test': dataset['validation'],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['test'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def _lowerCAmelCase ( A__ , A__ ):
# New Code #
lowercase__ = []
# Download the dataset
lowercase__ = load_dataset('glue' , 'mrpc' )
# Create our splits
lowercase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['lr']
lowercase__ = int(config['num_epochs'] )
lowercase__ = int(config['seed'] )
lowercase__ = int(config['batch_size'] )
lowercase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
lowercase__ = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowercase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
lowercase__, lowercase__, lowercase__ = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**A__ )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase__ = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase__ = torch.cat(A__ , dim=0 )
lowercase__ = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase__ = metric.compute(predictions=A__ , references=A__ )
accelerator.print('Average test metrics from all folds:' , A__ )
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=A__ , default=3 , help='The number of splits to perform across the dataset' )
lowercase__ = parser.parse_args()
lowercase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 642 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase ( _A : Features )-> Optional[int]:
"""simple docstring"""
A__ = np.inf
def set_batch_size(_A : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_A , _A ):
A__ = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_A , _A ):
A__ = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_A , _A ) and feature.dtype == "binary":
A__ = min(_A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_A , _A )
return None if batch_size is np.inf else batch_size
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
super().__init__(
UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths}
A__ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A__ = Parquet(
cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , hash=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
A__ = dataset
A__ = path_or_buf
A__ = batch_size or get_writer_batch_size(dataset.features )
A__ = parquet_writer_kwargs
def __A ( self ):
A__ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A__ = self._write(file_obj=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , **self.parquet_writer_kwargs )
else:
A__ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase__ , **self.parquet_writer_kwargs )
return written
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = 0
A__ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase__ )
A__ = self.dataset.features.arrow_schema
A__ = pq.ParquetWriter(UpperCAmelCase__ , schema=UpperCAmelCase__ , **UpperCAmelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase__ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A__ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase__ )
written += batch.nbytes
writer.close()
return written
| 491 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCAmelCase_ : Optional[int] = 300 # TEMPERATURE (unit = K)
def UpperCamelCase ( _A : float , _A : float , _A : float , )-> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 2_5_5 , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_pad
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __SCREAMING_SNAKE_CASE( self , _A , _A=False ):
"""simple docstring"""
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["shortest_edge"] * h / w )
__lowerCAmelCase = self.size["shortest_edge"]
elif w > h:
__lowerCAmelCase = self.size["shortest_edge"]
__lowerCAmelCase = int(self.size["shortest_edge"] * w / h )
else:
__lowerCAmelCase = self.size["shortest_edge"]
__lowerCAmelCase = self.size["shortest_edge"]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(UpperCAmelCase_ , key=lambda _A : item[0] )[0]
__lowerCAmelCase = max(UpperCAmelCase_ , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __UpperCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
__lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__lowerCAmelCase = json.loads(f.read() )
__lowerCAmelCase = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
__lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
__lowerCAmelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
__lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__lowerCAmelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
__lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
__lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
__lowerCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify orig_size
__lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
__lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__lowerCAmelCase = json.loads(f.read() )
__lowerCAmelCase = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
__lowerCAmelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__lowerCAmelCase = ConditionalDetrImageProcessor(format="coco_panoptic" )
__lowerCAmelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="pt" )
# verify pixel values
__lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
__lowerCAmelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase_ ) )
# verify boxes
__lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase_ ) )
# verify is_crowd
__lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase_ ) )
# verify class_labels
__lowerCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase_ ) )
# verify masks
__lowerCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase_ )
# verify orig_size
__lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase_ ) )
# verify size
__lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase_ ) )
| 711 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class a__ :
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=False , _A=True , _A="None" , _A=3 , _A=4 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = relative_attention
__lowerCAmelCase = position_biased_input
__lowerCAmelCase = pos_att_type
__lowerCAmelCase = scope
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel(config=_A )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForMaskedLM(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForSequenceClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDebertaVaForTokenClassification(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=_A )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_a : Union[str, Any] = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : str = False
_a : List[str] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_A )
@require_tf
class a__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__lowerCAmelCase = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(_A , attention_mask=_A )[0]
__lowerCAmelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 )
| 552 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 505 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = tmp_path / '''cache'''
__lowercase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase : int = ParquetDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_parquet_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = tmp_path / '''cache'''
__lowercase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase : Dict = features.copy() if features else default_expected_features
__lowercase : Dict = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase : List[Any] = ParquetDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_parquet_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = tmp_path / '''cache'''
__lowercase : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase : Tuple = ParquetDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , split=UpperCAmelCase__ ).read()
_check_parquet_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowercase : Optional[int] = parquet_path
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
__lowercase : Tuple = [parquet_path]
__lowercase : List[Any] = tmp_path / '''cache'''
__lowercase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase : int = ParquetDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_parquet_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for split in splits:
__lowercase : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = tmp_path / '''cache'''
__lowercase : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase : Dict = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_parquet_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[str] = tmp_path / '''cache'''
__lowercase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase : Tuple = features.copy() if features else default_expected_features
__lowercase : List[Any] = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_parquet_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if split:
__lowercase : List[str] = {split: parquet_path}
else:
__lowercase : List[Any] = '''train'''
__lowercase : Tuple = {'''train''': parquet_path, '''test''': parquet_path}
__lowercase : str = tmp_path / '''cache'''
__lowercase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase : List[Any] = ParquetDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_parquet_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = ParquetDatasetWriter(UpperCAmelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__lowercase : int = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__lowercase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = str(shared_datadir / '''test_image_rgb.jpg''' )
__lowercase : List[Any] = {'''image''': [image_path]}
__lowercase : int = Features({'''image''': Image()} )
__lowercase : Dict = Dataset.from_dict(UpperCAmelCase__ , features=UpperCAmelCase__ )
__lowercase : List[str] = ParquetDatasetWriter(UpperCAmelCase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__lowercase : Union[str, Any] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__lowercase : Optional[Any] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCAmelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
assert get_writer_batch_size(UpperCAmelCase__ ) == expected
| 714 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = len(__UpperCamelCase )
for i in range(length - 1 ):
__lowercase : Optional[Any] = i
for k in range(i + 1 , __UpperCamelCase ):
if collection[k] < collection[least]:
__lowercase : int = k
if least != i:
__lowercase ,__lowercase : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 523 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : int=13, UpperCamelCase__ : int=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : str=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Any=32, UpperCamelCase__ : str=2, UpperCamelCase__ : Union[str, Any]=4, UpperCamelCase__ : Any=37, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Any=0.1, UpperCamelCase__ : int=0.1, UpperCamelCase__ : List[Any]=5_12, UpperCamelCase__ : List[str]=16, UpperCamelCase__ : int=2, UpperCamelCase__ : Union[str, Any]=0.02, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : List[Any]=True, UpperCamelCase__ : List[str]="None", UpperCamelCase__ : List[Any]=3, UpperCamelCase__ : Tuple=4, UpperCamelCase__ : Dict=None, ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def __UpperCAmelCase ( self : int ) -> int:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=UpperCamelCase__, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ) -> str:
_A = TFDebertaVaModel(config=UpperCamelCase__ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = [input_ids, input_mask]
_A = model(UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ) -> Union[str, Any]:
_A = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : str, UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : str ) -> List[Any]:
_A = self.num_labels
_A = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ) -> Optional[Any]:
_A = self.num_labels
_A = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any] ) -> Any:
_A = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> str:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = TFDebertaVaModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_A = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self : int ) -> str:
pass
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_A = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_A = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )[0]
_A = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], UpperCamelCase__, atol=1e-4 )
| 107 | '''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
| 107 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ =Lock()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , UpperCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__SCREAMING_SNAKE_CASE = Pipe()
__SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__SCREAMING_SNAKE_CASE = temp_rs
__SCREAMING_SNAKE_CASE = temp_rr
for i in range(1 , len(UpperCAmelCase__ ) - 1 ):
__SCREAMING_SNAKE_CASE = Pipe()
__SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__SCREAMING_SNAKE_CASE = temp_rs
__SCREAMING_SNAKE_CASE = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(
len(UpperCAmelCase__ ) - 1,
arr[len(UpperCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = odd_even_transposition(UpperCAmelCase__ )
print('''Sorted List\n''' )
print(*UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 690 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a : int = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__a : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 637 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> int:
A__ = '''huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A__ = BitConfig(
conv_layer=UpperCamelCase_ , num_labels=1_0_0_0 , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , )
return config
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> Optional[int]:
if "stem.conv" in name:
A__ = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
A__ = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
A__ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
A__ = '''bit.encoder.''' + name
return name
def lowerCAmelCase__ ( )-> str:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=False )-> Optional[int]:
A__ = get_config(UpperCamelCase_ )
# load original model from timm
A__ = create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ )
timm_model.eval()
# load state_dict of original model
A__ = timm_model.state_dict()
for key in state_dict.copy().keys():
A__ = state_dict.pop(UpperCamelCase_ )
A__ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
A__ = BitForImageClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=UpperCamelCase_ ) )
A__ = transform.transforms
A__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
A__ = BitImageProcessor(
do_resize=UpperCamelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(UpperCamelCase_ ).unsqueeze(0 )
A__ = processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
# verify logits
with torch.no_grad():
A__ = model(UpperCamelCase_ )
A__ = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
A__ = timm_model(UpperCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
_lowercase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 632 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( __a ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ (__a) -> Dict:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
raise NotImplementedError() | 700 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a_ ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : str = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[int] = do_resize
__snake_case : Optional[int] = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : int = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
__snake_case : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : Dict = []
for i in range(self.batch_size):
__snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
__snake_case : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 61 | 0 |
def _snake_case (__lowercase):
UpperCamelCase_ = 0
for ch in input_str:
UpperCamelCase_ = ord(__lowercase)
UpperCamelCase_ = pow(2 , __lowercase)
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case__ : Dict = TypeVar("""T""")
class _a ( Generic[T] ):
"""simple docstring"""
A_ = 42 # Cache store of keys
A_ = 42 # References of the keys in cache
A_ = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = deque()
UpperCamelCase_ = set()
if not n:
UpperCamelCase_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase_ = n
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase_ = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 457 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCamelCase : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCamelCase : List[str] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class __magic_name__ ( A__ ):
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : int =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] =['''input_ids''', '''attention_mask''']
lowercase : Any =TaTokenizer
lowercase : List[int] =[]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : Tuple=1_00 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : int , ) -> Optional[Any]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase = [F'<extra_id_{i}>' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase = len(set(filter(lambda UpperCamelCase__ : bool("extra_id_" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = False if not self.vocab_file else True
UpperCAmelCase = extra_ids
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCamelCase__ , )
return max_model_length
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"<extra_id_\d+>" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 457 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """efficientnet"""
def __init__( self , snake_case = 3 , snake_case = 600 , snake_case = 2.0 , snake_case = 3.1 , snake_case = 8 , snake_case = [3, 3, 5, 3, 5, 5, 3] , snake_case = [32, 16, 24, 40, 80, 112, 192] , snake_case = [16, 24, 40, 80, 112, 192, 320] , snake_case = [] , snake_case = [1, 2, 2, 2, 1, 2, 1] , snake_case = [1, 2, 2, 3, 3, 4, 1] , snake_case = [1, 6, 6, 6, 6, 6, 6] , snake_case = 0.25 , snake_case = "swish" , snake_case = 2560 , snake_case = "mean" , snake_case = 0.02 , snake_case = 0.001 , snake_case = 0.99 , snake_case = 0.5 , snake_case = 0.2 , **snake_case , ):
super().__init__(**snake_case )
lowercase = num_channels
lowercase = image_size
lowercase = width_coefficient
lowercase = depth_coefficient
lowercase = depth_divisor
lowercase = kernel_sizes
lowercase = in_channels
lowercase = out_channels
lowercase = depthwise_padding
lowercase = strides
lowercase = num_block_repeats
lowercase = expand_ratios
lowercase = squeeze_expansion_ratio
lowercase = hidden_act
lowercase = hidden_dim
lowercase = pooling_type
lowercase = initializer_range
lowercase = batch_norm_eps
lowercase = batch_norm_momentum
lowercase = dropout_rate
lowercase = drop_connect_rate
lowercase = sum(snake_case ) * 4
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
| 84 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 0 |
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] ):
snake_case__ = 0
snake_case__ = 0
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:str ):
if vertex not in self.adjacency:
snake_case__ = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Union[str, Any] , _a:Tuple , _a:Dict ):
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
snake_case__ = weight
snake_case__ = weight
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
snake_case__ = list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ = edges[i][2] + 1
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = weight
snake_case__ = weight
def __str__( self:List[Any] ):
snake_case__ = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('''\n''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Dict=None , _a:List[Any]=None ):
snake_case__ = Graph()
if vertices is None:
snake_case__ = []
if edges is None:
snake_case__ = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] ):
snake_case__ = {}
snake_case__ = {}
def __len__( self:List[str] ):
return len(self.parent )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Any ):
if item in self.parent:
return self.find(_a )
snake_case__ = item
snake_case__ = 0
return item
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Any ):
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
snake_case__ = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[Any] , _a:int ):
snake_case__ = self.find(_a )
snake_case__ = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Optional[int] ):
snake_case__ = graph.num_vertices
snake_case__ = Graph.UnionFind()
snake_case__ = []
while num_components > 1:
snake_case__ = {}
for vertex in graph.get_vertices():
snake_case__ = -1
snake_case__ = graph.get_edges()
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ , snake_case__ , snake_case__ = edge
snake_case__ = union_find.find(_a )
snake_case__ = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ , snake_case__ , snake_case__ = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
snake_case__ = num_components - 1
snake_case__ = Graph.build(edges=_a )
return mst
| 706 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ : int = 1_6
lowerCamelCase__ : Union[str, Any] = 3_2
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Optional[int]:
snake_case__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ = 16
elif accelerator.mixed_precision != "no":
snake_case__ = 8
else:
snake_case__ = None
return tokenizer.pad(
__lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
snake_case__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ : Tuple = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1":
snake_case__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
snake_case__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ = config['''lr''']
snake_case__ = int(config['''num_epochs'''] )
snake_case__ = int(config['''seed'''] )
snake_case__ = int(config['''batch_size'''] )
set_seed(__lowerCAmelCase )
snake_case__ , snake_case__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
snake_case__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case__ = os.path.split(__lowerCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(__lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case__ = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ = model(**__lowerCAmelCase )
snake_case__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ = model(**__lowerCAmelCase )
snake_case__ = outputs.logits.argmax(dim=-1 )
snake_case__ , snake_case__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
snake_case__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __lowerCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__lowerCAmelCase ),
'''epoch''': epoch,
} , step=__lowerCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__lowerCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
snake_case__ = parser.parse_args()
snake_case__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 208 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = BarthezTokenizer
_UpperCamelCase : Optional[int] = BarthezTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
UpperCamelCase__ = tokenizer
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(snake_case ) , 101122 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = "I was born in 92000, and this is falsé."
UpperCamelCase__ = tokenizer.tokenize(snake_case )
UpperCamelCase__ = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase__ = tokenizer.encode(snake_case , add_special_tokens=snake_case )
UpperCamelCase__ = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(snake_case )
UpperCamelCase__ = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=snake_case , )
| 551 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase_( _A :str = "AAPL" )-> str:
UpperCamelCase__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCamelCase__ = BeautifulSoup(requests.get(_A ).text , "html.parser" )
UpperCamelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 551 | 1 |
from math import sqrt
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase (a__ :int = 1_0001 ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(a__ ):
count += 1
while count != nth:
number += 2
if is_prime(a__ ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 548 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase (a__ :str , a__ :complex , a__ :str = "x" , a__ :float = 10**-10 , a__ :int = 1 , ):
"""simple docstring"""
UpperCamelCase__ = symbols(a__ )
UpperCamelCase__ = lambdify(a__ , a__ )
UpperCamelCase__ = lambdify(a__ , diff(a__ , a__ ) )
UpperCamelCase__ = starting_point
while True:
if diff_function(a__ ) != 0:
UpperCamelCase__ = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 548 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = """Hello world! cécé herlolip"""
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = FairseqRobertaModel.from_pretrained(lowerCamelCase )
roberta.eval() # disable dropout
__lowercase = roberta.model.encoder.sentence_encoder
__lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , lowerCamelCase )
__lowercase = XLMRobertaXLForSequenceClassification(lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = roberta_sent_encoder.embed_tokens.weight
__lowercase = roberta_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowercase = roberta_sent_encoder.layer_norm.weight
__lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = roberta_sent_encoder.layers[i]
__lowercase = layer.attention
__lowercase = roberta_layer.self_attn_layer_norm.weight
__lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowercase = roberta_layer.self_attn.q_proj.weight
__lowercase = roberta_layer.self_attn.q_proj.bias
__lowercase = roberta_layer.self_attn.k_proj.weight
__lowercase = roberta_layer.self_attn.k_proj.bias
__lowercase = roberta_layer.self_attn.v_proj.weight
__lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowercase = roberta_layer.self_attn.out_proj.weight
__lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowercase = roberta_layer.final_layer_norm.weight
__lowercase = roberta_layer.final_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowercase = roberta_layer.fca.weight
__lowercase = roberta_layer.fca.bias
# output
__lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowercase = roberta_layer.fca.weight
__lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""].dense.weight
__lowercase = roberta.model.classification_heads["""mnli"""].dense.bias
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.weight
__lowercase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowercase = roberta.model.encoder.lm_head.dense.weight
__lowercase = roberta.model.encoder.lm_head.dense.bias
__lowercase = roberta.model.encoder.lm_head.layer_norm.weight
__lowercase = roberta.model.encoder.lm_head.layer_norm.bias
__lowercase = roberta.model.encoder.lm_head.weight
__lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = roberta.encode(lowerCamelCase ).unsqueeze(0 ) # batch of size 1
__lowercase = model(lowerCamelCase )[0]
if classification_head:
__lowercase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowerCamelCase ) )
else:
__lowercase = roberta.model(lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowercase = torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(lowerCamelCase ).mkdir(parents=lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 80 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main() | 113 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""")
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""")
UpperCAmelCase__ : Dict = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : Dict = jnp.array([tokenizer.encode(_lowerCamelCase)])
UpperCAmelCase__ : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Any = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
UpperCAmelCase__ : Any = model(_lowerCamelCase)["""last_hidden_state"""]
self.assertEqual(output.shape , _lowerCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3)) | 113 | 1 |
"""simple docstring"""
def lowerCamelCase (a_ :int) -> bool:
lowercase :Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase (a_ :int = 5000) -> int:
lowercase :List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , a_)]
for i, pentagonal_i in enumerate(a_):
for j in range(a_ , len(a_)):
lowercase :Dict = pentagonal_nums[j]
lowercase :Dict = pentagonal_i + pentagonal_j
lowercase :Optional[int] = pentagonal_j - pentagonal_i
if is_pentagonal(a_) and is_pentagonal(a_):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 677 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
@dataclass
class __magic_name__ :
__A : nn.Module
__A : List[nn.Module] = field(default_factory=__UpperCAmelCase )
__A : list = field(default_factory=__UpperCAmelCase )
def __snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tensor , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :List[str] = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ , nn.Convad ) or isinstance(snake_case__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self : int , snake_case__ : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def __snake_case ( self : int ):
'''simple docstring'''
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
__A : nn.Module
__A : nn.Module
__A : int = 0
__A : List = field(default_factory=__UpperCAmelCase )
__A : List = field(default_factory=__UpperCAmelCase )
def __call__( self : Dict , snake_case__ : Tensor ):
'''simple docstring'''
lowercase :Dict = Tracker(self.dest )(snake_case__ ).parametrized
lowercase :Optional[Any] = Tracker(self.src )(snake_case__ ).parametrized
lowercase :List[str] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip , snake_case__ ) )
lowercase :Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip , snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(snake_case__ )} operations while"""
f""" destination module has {len(snake_case__ )}.""" )
for dest_m, src_m in zip(snake_case__ , snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCamelCase (a_ :str , a_ :ResNetConfig , a_ :Path , a_ :bool = True) -> Optional[Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
lowercase :Union[str, Any] = timm.create_model(a_ , pretrained=a_).eval()
lowercase :Tuple = ResNetForImageClassification(a_).eval()
lowercase :int = ModuleTransfer(src=a_ , dest=a_)
lowercase :List[Any] = torch.randn((1, 3, 224, 224))
module_transfer(a_)
assert torch.allclose(from_model(a_) , our_model(a_).logits), "The model logits don't match the original one."
lowercase :List[Any] = F"""resnet{'-'.join(name.split('resnet'))}"""
print(a_)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a_ , )
# we can use the convnext one
lowercase :Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''')
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a_ , )
print(F"""Pushed {checkpoint_name}""")
def lowerCamelCase (a_ :Path , a_ :str = None , a_ :bool = True) -> int:
lowercase :Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase :Union[str, Any] = 1000
lowercase :Any = (1, num_labels)
lowercase :Tuple = '''huggingface/label-files'''
lowercase :List[str] = num_labels
lowercase :Union[str, Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :Any = {int(a_): v for k, v in idalabel.items()}
lowercase :str = idalabel
lowercase :Any = {v: k for k, v in idalabel.items()}
lowercase :Union[str, Any] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
lowercase :Optional[int] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic'''),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck'''),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
def _lowerCAmelCase ( A__: List[Any] , A__: List[Any] ):
'''simple docstring'''
UpperCAmelCase = np.argmax(A__ , axis=1 )
return np.sum(outputs == labels )
def _lowerCAmelCase ( A__: Tuple ):
'''simple docstring'''
with open(A__ , encoding='''utf_8''' ) as f:
UpperCAmelCase = csv.reader(A__ )
UpperCAmelCase = []
next(A__ ) # skip the first line
for line in tqdm(A__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowerCAmelCase ( A__: Dict , A__: List[Any] , A__: Tuple , A__: List[str] , A__: Optional[int] , A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = []
for dataset in encoded_datasets:
UpperCAmelCase = len(A__ )
UpperCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A__ ):
UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase = with_conta
UpperCAmelCase = with_conta
UpperCAmelCase = len(A__ ) - 1
UpperCAmelCase = len(A__ ) - 1
UpperCAmelCase = with_conta
UpperCAmelCase = with_conta
UpperCAmelCase = mc_label
UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) )
return tensor_datasets
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=A__ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=A__ , type=A__ , required=A__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=A__ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=A__ , default='''''' )
parser.add_argument('''--seed''' , type=A__ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=A__ , default=3 )
parser.add_argument('''--train_batch_size''' , type=A__ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=A__ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=A__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=A__ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=A__ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=A__ , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=A__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=A__ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=A__ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=A__ , default=0.9 )
parser.add_argument('''--n_valid''' , type=A__ , default=374 )
parser.add_argument('''--server_ip''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase = parser.parse_args()
print(A__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(A__ , A__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A__ )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A__ )
UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A__ ) )
model.to(A__ )
# Load and encode the datasets
def tokenize_and_encode(A__: Optional[int] ):
if isinstance(A__ , A__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) )
elif isinstance(A__ , A__ ):
return obj
return [tokenize_and_encode(A__ ) for o in obj]
logger.info('''Encoding dataset...''' )
UpperCAmelCase = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase = (train_dataset, eval_dataset)
UpperCAmelCase = tokenize_and_encode(A__ )
# Compute the max input length for the Transformer
UpperCAmelCase = model.config.n_positions // 2 - 2
UpperCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase = min(A__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase = pre_process_datasets(A__ , A__ , A__ , *A__ )
UpperCAmelCase , UpperCAmelCase = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase = TensorDataset(*A__ )
UpperCAmelCase = RandomSampler(A__ )
UpperCAmelCase = DataLoader(A__ , sampler=A__ , batch_size=args.train_batch_size )
UpperCAmelCase = TensorDataset(*A__ )
UpperCAmelCase = SequentialSampler(A__ )
UpperCAmelCase = DataLoader(A__ , sampler=A__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase = args.max_steps
UpperCAmelCase = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase = list(model.named_parameters() )
UpperCAmelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
UpperCAmelCase = AdamW(A__ , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCAmelCase = get_linear_schedule_with_warmup(
A__ , num_warmup_steps=args.warmup_steps , num_training_steps=A__ )
if args.do_train:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = tqdm(A__ , desc='''Training''' )
for step, batch in enumerate(A__ ):
UpperCAmelCase = tuple(t.to(A__ ) for t in batch )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ )
UpperCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(A__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase = model.module if hasattr(A__ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase = os.path.join(args.output_dir , A__ )
UpperCAmelCase = os.path.join(args.output_dir , A__ )
torch.save(model_to_save.state_dict() , A__ )
model_to_save.config.to_json_file(A__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A__ )
if args.do_eval:
model.eval()
UpperCAmelCase , UpperCAmelCase = 0, 0
UpperCAmelCase , UpperCAmelCase = 0, 0
for batch in tqdm(A__ , desc='''Evaluating''' ):
UpperCAmelCase = tuple(t.to(A__ ) for t in batch )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = batch
with torch.no_grad():
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = model(
A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ )
UpperCAmelCase = mc_logits.detach().cpu().numpy()
UpperCAmelCase = mc_labels.to('''cpu''' ).numpy()
UpperCAmelCase = accuracy(A__ , A__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase = eval_loss / nb_eval_steps
UpperCAmelCase = eval_accuracy / nb_eval_examples
UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
UpperCAmelCase = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(A__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , A__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 704 |
import re
import string
import numpy as np
import datasets
__magic_name__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
__magic_name__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
__magic_name__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=False , ) -> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in predictions] )
UpperCAmelCase = np.array([re.sub(_snake_case , '''''' , _snake_case ) for x in references] )
else:
UpperCAmelCase = np.asarray(_snake_case )
UpperCAmelCase = np.asarray(_snake_case )
if ignore_case:
UpperCAmelCase = np.char.lower(_snake_case )
UpperCAmelCase = np.char.lower(_snake_case )
if ignore_punctuation:
UpperCAmelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
if ignore_numbers:
UpperCAmelCase = string.digits.maketrans('''''' , '''''' , string.digits )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = np.char.translate(_snake_case , table=_snake_case )
UpperCAmelCase = predictions == references
return {"exact_match": np.mean(_snake_case ) * 100}
| 391 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase : List[str] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
lowerCamelCase : List[str] = None
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCamelCase__ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCamelCase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ = bool(qa['answers']['text'] )
return qid_to_has_ans
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
def remove_articles(lowercase : Dict ):
return ARTICLES_REGEX.sub(' ' , lowerCamelCase__ )
def white_space_fix(lowercase : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowercase : Union[str, Any] ):
lowerCamelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple ):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCamelCase__ ).split()
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict ):
'''simple docstring'''
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = collections.Counter(lowerCamelCase__ ) & collections.Counter(lowerCamelCase__ )
lowerCamelCase_ = sum(common.values() )
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase_ = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ = 1.0 * num_same / len(lowerCamelCase__ )
lowerCamelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase_ = qa["id"]
lowerCamelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCamelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowerCamelCase_ = preds[qid]
# Take max over all gold answers
lowerCamelCase_ = max(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
lowerCamelCase_ = max(compute_fa(lowerCamelCase__ , lowerCamelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Any , lowercase : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = {}
for qid, s in scores.items():
lowerCamelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase_ = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase_ = s
return new_scores
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[Any]=None ):
'''simple docstring'''
if not qid_list:
lowerCamelCase_ = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCamelCase_ = len(lowerCamelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
for k in new_eval:
lowerCamelCase_ = new_eval[k]
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : Any ):
'''simple docstring'''
plt.step(lowerCamelCase__ , lowerCamelCase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCamelCase__ , lowerCamelCase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCamelCase__ )
plt.savefig(lowerCamelCase__ )
plt.clf()
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Optional[int]=None , lowercase : List[Any]=None ):
'''simple docstring'''
lowerCamelCase_ = sorted(lowerCamelCase__ , key=lambda lowercase : na_probs[k] )
lowerCamelCase_ = 0.0
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.0
lowerCamelCase_ = [1.0]
lowerCamelCase_ = [0.0]
lowerCamelCase_ = 0.0
for i, qid in enumerate(lowerCamelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase_ = true_pos / float(i + 1 )
lowerCamelCase_ = true_pos / float(lowerCamelCase__ )
if i == len(lowerCamelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase__ )
recalls.append(lowerCamelCase__ )
if out_image:
plot_pr_curve(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return {"ap": 100.0 * avg_prec}
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
lowerCamelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCamelCase_ = {k: float(lowerCamelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCamelCase_ = make_precision_recall_eval(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , out_image=os.path.join(lowerCamelCase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_exact' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_f1' )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'pr_oracle' )
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
if not qid_list:
return
lowerCamelCase_ = [na_probs[k] for k in qid_list]
lowerCamelCase_ = np.ones_like(lowerCamelCase__ ) / float(len(lowerCamelCase__ ) )
plt.hist(lowerCamelCase__ , weights=lowerCamelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCamelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase_ = num_no_ans
lowerCamelCase_ = cur_score
lowerCamelCase_ = 0.0
lowerCamelCase_ = sorted(lowerCamelCase__ , key=lambda lowercase : na_probs[k] )
for i, qid in enumerate(lowerCamelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase_ = scores[qid]
else:
if preds[qid]:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase_ = cur_score
lowerCamelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase__ ), best_thresh
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : str , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = find_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = best_exact
lowerCamelCase_ = exact_thresh
lowerCamelCase_ = best_fa
lowerCamelCase_ = fa_thresh
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
lowerCamelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase_ = json.load(lowerCamelCase__ )
else:
lowerCamelCase_ = {k: 0.0 for k in preds}
lowerCamelCase_ = make_qid_to_has_ans(lowerCamelCase__ ) # maps qid to True/False
lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase_ = get_raw_scores(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ = apply_no_ans_threshold(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.na_prob_thresh )
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ )
if has_ans_qids:
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'HasAns' )
if no_ans_qids:
lowerCamelCase_ = make_eval_dict(lowerCamelCase__ , lowerCamelCase__ , qid_list=lowerCamelCase__ )
merge_eval(lowerCamelCase__ , lowerCamelCase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCamelCase__ , lowerCamelCase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
else:
print(json.dumps(lowerCamelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase : str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 70 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 496 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
_A : Tuple = 'all_checks'
_A : Any = 'basic_checks'
_A : str = 'no_checks'
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
snake_case__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case__ = " for " + verification_name if verification_name is not None else ""
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
pass
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
snake_case__ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase = True ):
if record_checksum:
snake_case__ = shaaaa()
with open(__lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(__lowerCAmelCase )
snake_case__ = m.hexdigest()
else:
snake_case__ = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 530 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPFeatureExtractor''']
__magic_name__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 530 | 1 |
import math
import sys
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =''
try:
with open(_snake_case , 'rb' ) as binary_file:
__a =binary_file.read()
for dat in data:
__a =F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a ={'0': '0', '1': '1'}
__a , __a ='', ''
__a =len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a =lexicon[curr_string]
result += last_match_id
__a =last_match_id + '0'
if math.loga(_snake_case ).is_integer():
__a ={}
for curr_key in list(_snake_case ):
__a =lexicon.pop(_snake_case )
__a =new_lex
__a =last_match_id + '1'
index += 1
__a =''
return result
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =8
try:
with open(_snake_case , 'wb' ) as opened_file:
__a =[
to_write[i : i + byte_length]
for i in range(0 , len(_snake_case ) , _snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_snake_case , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =0
for letter in data_bits:
if letter == "1":
break
counter += 1
__a =data_bits[counter:]
__a =data_bits[counter + 1 :]
return data_bits
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =read_file_binary(_snake_case )
__a =remove_prefix(_snake_case )
__a =decompress_data(_snake_case )
write_file_binary(_snake_case , _snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 242 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , )
def __magic_name__ ( self , __snake_case = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 512 , __snake_case = 512 , __snake_case = 50 , __snake_case = 7.5 , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , __snake_case = None , **__snake_case , ) -> Tuple:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a =text_embeddings.shape
__a =text_embeddings.repeat(1 , __snake_case , 1 )
__a =text_embeddings.view(bs_embed * num_images_per_prompt , __snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a =42
if negative_prompt is None:
__a =['']
elif type(__snake_case ) is not type(__snake_case ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__snake_case )} !='
f' {type(__snake_case )}.' )
elif isinstance(__snake_case , __snake_case ):
__a =[negative_prompt]
elif batch_size != len(__snake_case ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__snake_case )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__a =negative_prompt
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =uncond_embeddings.shape[1]
__a =uncond_embeddings.repeat(__snake_case , __snake_case , 1 )
__a =uncond_embeddings.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a =torch.randn(
__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(self.device )
__a =torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
__a =torch.randn(
__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
__a =torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__a =latents_reference.to(self.device )
__a =latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a =(latents_shape[3] - latents_shape_reference[3]) // 2
__a =(latents_shape[2] - latents_shape_reference[2]) // 2
__a =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a =0 if dx < 0 else dx
__a =0 if dy < 0 else dy
__a =max(-dx , 0 )
__a =max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a ={}
if accepts_eta:
__a =eta
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
__a =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a =self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
__a =self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a =noise_pred.chunk(2 )
__a =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =1 / 0.1_8215 * latents
__a =self.vae.decode(__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a =self.feature_extractor(self.numpy_to_pil(__snake_case ) , return_tensors='pt' ).to(
self.device )
__a , __a =self.safety_checker(
images=__snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a =None
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 242 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase :
__UpperCamelCase =42
# setable values
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =None
@classmethod
def UpperCamelCase ( cls : Any , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =[e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCamelCase =42
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0_001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = dtype
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[CommonSchedulerState] = None ):
"""simple docstring"""
if common is None:
SCREAMING_SNAKE_CASE = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCamelCase ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def UpperCamelCase ( self : str , snake_case__ : DDPMSchedulerState , snake_case__ : List[Any] , snake_case__ : Dict=None , snake_case__ : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE = jnp.clip(snake_case__ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE = jnp.log(jnp.clip(snake_case__ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE = variance
SCREAMING_SNAKE_CASE = state.common.betas[t]
SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = timestep
if key is None:
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , num=1 )
SCREAMING_SNAKE_CASE = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( A_ = True, *A_, **A_ ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__magic_name__ = False
if main_process_only:
__magic_name__ = PartialState().local_process_index == 0
return _tqdm(*A_, **A_, disable=A_ )
| 529 |
'''simple docstring'''
from typing import List
import numpy as np
def _A ( snake_case ) -> int:
_lowercase : Optional[int] = {key: len(snake_case ) for key, value in gen_kwargs.items() if isinstance(snake_case , snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_lowercase : int = max(lists_lengths.values() , default=0 )
return max(1 , snake_case )
def _A ( snake_case , snake_case ) -> List[range]:
_lowercase : int = []
for group_idx in range(snake_case ):
_lowercase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowercase : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowercase : Optional[Any] = range(snake_case , start + num_shards_to_add )
shards_indices_per_group.append(snake_case )
return shards_indices_per_group
def _A ( snake_case , snake_case ) -> List[dict]:
_lowercase : Optional[Any] = _number_of_shards_in_gen_kwargs(snake_case )
if num_shards == 1:
return [dict(snake_case )]
else:
_lowercase : Any = _distribute_shards(num_shards=snake_case , max_num_jobs=snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case , snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case ) )
]
def _A ( snake_case ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _A ( snake_case , snake_case ) -> dict:
_lowercase : Any = {len(snake_case ) for value in gen_kwargs.values() if isinstance(snake_case , snake_case )}
_lowercase : Optional[int] = {}
for size in list_sizes:
_lowercase : Optional[Any] = list(range(snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowercase : Dict = dict(snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case , snake_case ):
_lowercase : Tuple = [value[i] for i in indices_per_size[len(snake_case )]]
return shuffled_kwargs
| 245 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _a :
def __init__( self : str , lowercase : Any , lowercase : Optional[int]=3 , lowercase : List[Any]=7 , lowercase : List[str]=True , lowercase : List[str]=True , lowercase : Union[str, Any]=False , lowercase : Optional[int]=True , lowercase : str=99 , lowercase : int=32 , lowercase : Optional[int]=5 , lowercase : Tuple=4 , lowercase : Optional[Any]=37 , lowercase : Any="gelu" , lowercase : Optional[int]=0.1 , lowercase : Any=0.1 , lowercase : str=512 , lowercase : str=16 , lowercase : str=2 , lowercase : str=0.02 , lowercase : List[str]=3 , lowercase : Union[str, Any]=4 , lowercase : Optional[int]=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase , )
def A ( self : List[str] , lowercase : Dict , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = FalconModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase )
UpperCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , lowercase : List[str] , lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[Any] , lowercase : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = FalconModel(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
UpperCAmelCase = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase = FalconForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : str , lowercase : Tuple , lowercase : List[str] , lowercase : List[str] , lowercase : Optional[int] , lowercase : int , lowercase : Tuple , ):
'''simple docstring'''
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = FalconForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
# first forward pass
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )['''hidden_states'''][0]
UpperCAmelCase = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( __a , __a , __a , unittest.TestCase ):
__a : List[str] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__a : Optional[int] = (FalconForCausalLM,) if is_torch_available() else ()
__a : int = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : str = False
__a : int = False
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = FalconModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , *UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase = alibi
self.model_tester.create_and_check_model(lowercase , *lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowercase )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = FalconForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''single_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowercase )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase = FalconForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = FalconForCausalLM(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , use_cache=lowercase )
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase = model._convert_cache_to_standard_format(lowercase , lowercase )
for layer in range(len(lowercase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = '''multi_label_classification'''
UpperCAmelCase = input_dict['''input_ids''']
UpperCAmelCase = input_ids.ne(1 ).to(lowercase )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = FalconForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : str ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase , '''use_cache''' ):
return
UpperCAmelCase = model_class(lowercase ).to(lowercase )
if "use_cache" not in inputs:
UpperCAmelCase = True
UpperCAmelCase = model(**lowercase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase = (
getattr(lowercase , '''decoder_layers''' , lowercase )
or getattr(lowercase , '''num_decoder_layers''' , lowercase )
or config.num_hidden_layers
)
UpperCAmelCase = getattr(lowercase , '''num_kv_heads''' , config.num_attention_heads )
UpperCAmelCase = getattr(lowercase , '''d_model''' , config.hidden_size )
UpperCAmelCase = embed_dim // num_attention_heads
UpperCAmelCase = outputs['''past_key_values''']
self.assertEqual(len(lowercase ) , lowercase )
UpperCAmelCase , UpperCAmelCase = inputs['''input_ids'''].shape
for i in range(lowercase ):
if config.new_decoder_architecture:
UpperCAmelCase = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
UpperCAmelCase = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(lowercase )
UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowercase )
UpperCAmelCase = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
UpperCAmelCase = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=19 )
UpperCAmelCase = tokenizer.batch_decode(lowercase )[0]
self.assertEqual(lowercase , lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase )
UpperCAmelCase = FalconForCausalLM.from_pretrained(lowercase )
model.eval()
model.to(lowercase )
UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowercase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase , do_sample=lowercase , max_new_tokens=4 )
model.generate(**lowercase , do_sample=lowercase , max_new_tokens=4 )
model.generate(**lowercase , num_beams=2 , max_new_tokens=4 )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase )
UpperCAmelCase = FalconForCausalLM.from_pretrained(lowercase )
model.eval()
model.to(device=lowercase )
UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowercase )
# Test results are the same with and without cache
UpperCAmelCase = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=20 , use_cache=lowercase )
UpperCAmelCase = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=20 , use_cache=lowercase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 358 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A =logging.get_logger(__name__)
@add_end_docstrings(__a )
class _a ( __a ):
def __init__( self : Optional[int] , *lowercase : Any , **lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def A ( self : List[str] , lowercase : str=None , lowercase : List[str]=None , lowercase : List[str]=None , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , lowercase : Union["Image.Image", str] , lowercase : str = None , **lowercase : Optional[int] ):
'''simple docstring'''
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
UpperCAmelCase = {'''image''': image, '''question''': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase , **lowercase )
return results
def A ( self : List[Any] , lowercase : List[str] , lowercase : Any=False , lowercase : Any=False ):
'''simple docstring'''
UpperCAmelCase = load_image(inputs['''image'''] )
UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
UpperCAmelCase = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def A ( self : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = self.model(**lowercase )
return model_outputs
def A ( self : Union[str, Any] , lowercase : int , lowercase : Optional[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 358 | 1 |
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : list[int] ) -> None:
UpperCamelCase : int = len(snake_case__ )
print('The following activities are selected:' )
# The first activity is always selected
UpperCamelCase : List[Any] = 0
print(snake_case__ , end=',' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=',' )
UpperCamelCase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [1, 3, 0, 5, 8, 5]
__UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 40 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
def lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("-f" )
_a = parser.parse_args()
return args.f
def lowerCAmelCase ( UpperCamelCase_: int ) -> List[str]:
'''simple docstring'''
_a = {}
_a = os.path.join(UpperCamelCase_ , "all_results.json" )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
_a = json.load(UpperCamelCase_ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
_a = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (_UpperCAmelCase ):
@classmethod
def lowerCamelCase__ ( cls ) ->Union[str, Any]:
'''simple docstring'''
_a = tempfile.mkdtemp()
_a = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
_a = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCamelCase__ ( cls ) ->int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = 7 if get_gpu_count() > 1 else 2
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(a_ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(a_ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "translation_no_trainer" ) ) )
@slow
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = logging.StreamHandler(sys.stdout )
logger.addHandler(a_ )
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_a = get_results(a_ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = self.get_auto_remove_tmp_dir()
_a = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
_a = get_results(a_ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(a_ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_ , "image_classification_no_trainer" ) ) )
| 612 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase ( UpperCamelCase_: str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a = model_type_to_module_name(UpperCamelCase_ )
_a = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , "__name__" , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a = importlib.import_module("transformers" )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase ( UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: Optional[Union[str, os.PathLike]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[Dict[str, str]] = None , UpperCamelCase_: Optional[Union[bool, str]] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , **UpperCamelCase_: Dict , ) -> Optional[int]:
'''simple docstring'''
_a = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase_ )
class lowercase_ :
def __init__( self ) ->List[Any]:
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Dict:
'''simple docstring'''
_a = kwargs.pop("config" , a_ )
_a = kwargs.pop("trust_remote_code" , a_ )
_a = True
_a , _a = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
_a = config_dict.get("image_processor_type" , a_ )
_a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoFeatureExtractor"]
_a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
_a = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
_a = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_a = image_processor_class_from_name(a_ )
_a = image_processor_auto_map is not None
_a = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
_a = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
_a = get_class_from_dynamic_module(
a_ , a_ , **a_ )
_a = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
_a = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( a_ , a_ ) ->Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 612 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.