code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Tuple ) -> List[str]:
__UpperCamelCase : str = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_2_8, "min_length": 1_2, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_4_2, "min_length": 5_6, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 6_2, "min_length": 1_1, "num_beams": 6},
}
}
__UpperCamelCase : List[Any] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_2_8,
"task_specific_params.summarization.min_length": 1_2,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_4_2,
"task_specific_params.summarization_cnn.min_length": 5_6,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 6_2,
"task_specific_params.summarization_xsum.min_length": 1_1,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
__UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self :List[str] ) -> str:
__UpperCamelCase : Union[str, Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
__UpperCamelCase : Dict = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Tuple = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[int] = np.random.randn(3 , 4 )
__UpperCamelCase : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
__UpperCamelCase : List[Any] = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : List[str] = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self :int ) -> Tuple:
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : Dict = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
__UpperCamelCase : List[str] = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Any = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self :Any ) -> Dict:
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (1_2, 5) ) , np.reshape(a , (1_2, 5) ) ) )
@require_torch
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
__UpperCamelCase : Any = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Optional[Any] = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (1_2, 5) ) , reshape(a , (1_2, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self :Optional[int] ) -> int:
__UpperCamelCase : List[Any] = np.random.randn(3 , 4 )
__UpperCamelCase : str = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
__UpperCamelCase : int = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : Optional[int] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (1_2, 5) ) , reshape(a , (1_2, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self :List[Any] ) -> str:
__UpperCamelCase : List[str] = np.random.randn(3 , 4 )
__UpperCamelCase : Any = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
__UpperCamelCase : Optional[int] = np.random.randn(3 , 4 , 5 )
__UpperCamelCase : List[Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (1_2, 5) ) , np.asarray(reshape(a , (1_2, 5) ) ) ) )
def _lowerCamelCase ( self :Dict ) -> List[str]:
__UpperCamelCase : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
__UpperCamelCase : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : Any = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
__UpperCamelCase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : str = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : List[str] = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
__UpperCamelCase : Tuple = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : Union[str, Any] = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self :Dict ) -> int:
__UpperCamelCase : int = np.random.randn(1 , 3 , 4 )
__UpperCamelCase : Tuple = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
__UpperCamelCase : int = np.random.randn(1 , 4 , 1 , 5 )
__UpperCamelCase : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self :Any ) -> List[str]:
__UpperCamelCase : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]:
__UpperCamelCase : Any = np.random.randn(3 , 4 )
__UpperCamelCase : Dict = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : str = np.random.randn(3 , 4 )
__UpperCamelCase : Dict = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self :Any ) -> Any:
__UpperCamelCase : Any = np.random.randn(3 , 4 )
__UpperCamelCase : Optional[int] = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) ) | 232 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = '▁'
lowercase : Tuple = {'vocab_file': 'spiece.model'}
lowercase : Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
lowercase : Any = {
'google/reformer-crime-and-punishment': 524288,
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :int , a :List[Any] , a :Tuple="</s>" , a :str="<unk>" , a :Dict=[] , a :Optional[Dict[str, Any]] = None , **a :Union[str, Any] , ) -> None:
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :Optional[int] ) -> Dict[str, int]:
__UpperCamelCase : str = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :int , a :List[str] ) -> int:
__UpperCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : int = {}
__UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> str:
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self :Dict , a :Union[str, Any] ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__UpperCamelCase : Optional[int] = self.sp_model.IdToPiece(a )
return token
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 232 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ :Any = get_logger(__name__)
lowercase__ :Tuple = Path(__file__).parent / "model_card_template.md"
lowercase__ :Optional[int] = uuida().hex
lowercase__ :Union[str, Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
lowercase__ :Union[str, Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
lowercase__ :List[str] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def UpperCamelCase ( lowerCAmelCase__ = None ):
'''simple docstring'''
lowercase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
ua += "; " + user_agent
return ua
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ):
'''simple docstring'''
if token is None:
lowercase = HfFolder.get_token()
if organization is None:
lowercase = whoami(lowerCAmelCase__ )['''name''']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(lowerCAmelCase__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
lowercase = args.hub_token if hasattr(lowerCAmelCase__ , '''hub_token''' ) else None
lowercase = get_full_repo_name(lowerCAmelCase__ , token=lowerCAmelCase__ )
lowercase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCAmelCase__ , model_name=lowerCAmelCase__ , repo_name=lowerCAmelCase__ , dataset_name=args.dataset_name if hasattr(lowerCAmelCase__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCAmelCase__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(lowerCAmelCase__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCAmelCase__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCAmelCase__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCAmelCase__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCAmelCase__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCAmelCase__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(lowerCAmelCase__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCAmelCase__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
lowercase = os.path.join(args.output_dir , '''README.md''' )
model_card.save(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase = str(Path(lowerCAmelCase__ ).as_posix() )
lowercase = re.search(R'''snapshots/([^/]+)/''' , lowerCAmelCase__ )
if search is None:
return None
lowercase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCAmelCase__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ :int = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
lowercase__ :Any = os.path.join(hf_cache_home, "diffusers")
def UpperCamelCase ( lowerCAmelCase__ = None , lowerCAmelCase__ = None ):
'''simple docstring'''
if new_cache_dir is None:
lowercase = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase = old_diffusers_cache
lowercase = Path(lowerCAmelCase__ ).expanduser()
lowercase = Path(lowerCAmelCase__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase = new_cache_dir / old_blob_path.relative_to(lowerCAmelCase__ )
new_blob_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.replace(lowerCAmelCase__ , lowerCAmelCase__ )
try:
os.symlink(lowerCAmelCase__ , lowerCAmelCase__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ :List[str] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
lowercase__ :List[str] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ :str = int(f.read())
except ValueError:
lowercase__ :str = 0
if cache_version < 1:
lowercase__ :str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
lowercase__ :str = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
if variant is not None:
lowercase = weights_name.split('''.''' )
lowercase = splits[:-1] + [variant] + splits[-1:]
lowercase = '''.'''.join(lowerCAmelCase__ )
return weights_name
def UpperCamelCase ( lowerCAmelCase__ , *,
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , ):
'''simple docstring'''
lowercase = str(lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCAmelCase__ ):
if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ):
# Load from a PyTorch checkpoint
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
lowercase = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
lowercase = hf_hub_download(
lowerCAmelCase__ , filename=_add_variant(lowerCAmelCase__ , lowerCAmelCase__ ) , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , lowerCAmelCase__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCAmelCase__ , lowerCAmelCase__ )}\' so that the correct variant file can be added.' , lowerCAmelCase__ , )
try:
# 2. Load model file as usual
lowercase = hf_hub_download(
lowerCAmelCase__ , filename=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , user_agent=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'''this model name. Check the model page at '''
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 97 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=1_6 ,A__=3_6 ,A__=6 ,A__=6 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,sentence_order_label=A__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : str =True
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
return inputs_dict
def A__ ( self):
lowercase = AlbertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
@slow
def A__ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = AlbertModel.from_pretrained('''albert-base-v2''')
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__)[0]
lowercase = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A__ ,atol=1E-4))
| 97 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__SCREAMING_SNAKE_CASE : int = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__SCREAMING_SNAKE_CASE : Tuple = """▁"""
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = VOCAB_FILES_NAMES
__UpperCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , A : Optional[int] , A : Any=True , A : List[Any]=True , A : Optional[Any]=False , A : Any="[CLS]" , A : List[str]="[SEP]" , A : str="<unk>" , A : Any="[SEP]" , A : int="<pad>" , A : Union[str, Any]="[CLS]" , A : List[str]="[MASK]" , A : Optional[Dict[str, Any]] = None , **A : int , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase : Any = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : List[Any] = do_lower_case
_UpperCAmelCase : str = remove_space
_UpperCAmelCase : Optional[Any] = keep_accents
_UpperCAmelCase : Optional[int] = vocab_file
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def _A ( self : str ):
return len(self.sp_model )
def _A ( self : Any ):
_UpperCAmelCase : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
_UpperCAmelCase : int = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
return state
def __setstate__( self : List[str] , A : int ):
_UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : List[Any] , A : Optional[int] ):
if self.remove_space:
_UpperCAmelCase : int = " ".join(inputs.strip().split() )
else:
_UpperCAmelCase : Tuple = inputs
_UpperCAmelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_UpperCAmelCase : Union[str, Any] = unicodedata.normalize("NFKD" , A )
_UpperCAmelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
_UpperCAmelCase : int = outputs.lower()
return outputs
def _A ( self : str , A : str ):
_UpperCAmelCase : Union[str, Any] = self.preprocess_text(A )
_UpperCAmelCase : Optional[int] = self.sp_model.encode(A , out_type=A )
_UpperCAmelCase : int = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_UpperCAmelCase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase : Optional[Any] = cur_pieces[1:]
else:
_UpperCAmelCase : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def _A ( self : Union[str, Any] , A : Optional[Any] ):
return self.sp_model.PieceToId(A )
def _A ( self : int , A : Optional[Any] ):
return self.sp_model.IdToPiece(A )
def _A ( self : int , A : Tuple ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = ""
_UpperCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(A )
_UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _A ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A ( self : Tuple , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def _A ( self : int , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
_UpperCAmelCase : int = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 31 | """simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "efficientformer"
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: List[str] = hidden_act
lowercase__: Union[str, Any] = hidden_dropout_prob
lowercase__: Union[str, Any] = hidden_sizes
lowercase__: Any = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Dict = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = patch_size
lowercase__: int = num_channels
lowercase__: str = depths
lowercase__: List[str] = mlp_expansion_ratio
lowercase__: List[str] = downsamples
lowercase__: List[str] = dim
lowercase__: Optional[Any] = key_dim
lowercase__: Union[str, Any] = attention_ratio
lowercase__: Any = resolution
lowercase__: Any = pool_size
lowercase__: List[Any] = downsample_patch_size
lowercase__: Optional[int] = downsample_stride
lowercase__: Union[str, Any] = downsample_pad
lowercase__: List[Any] = drop_path_rate
lowercase__: Optional[Any] = num_metaad_blocks
lowercase__: Any = distillation
lowercase__: Optional[int] = use_layer_scale
lowercase__: List[str] = layer_scale_init_value
lowercase__: Dict = image_size
lowercase__: List[str] = batch_norm_eps
| 177 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : Optional[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : List[str] = {'''facebook/blenderbot_small-90M''': 512}
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->Optional[Any]:
'''simple docstring'''
a : Optional[Any] = set()
a : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : Tuple = char
a : int = set(_lowercase )
return pairs
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : Tuple =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="__start__" , lowerCAmelCase__="__end__" , lowerCAmelCase__="__unk__" , lowerCAmelCase__="__null__" , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle:
a : int = json.load(lowerCAmelCase__ )
a : Tuple = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle:
a : int = merges_handle.read().split("\n" )[1:-1]
a : List[str] = [tuple(merge.split() ) for merge in merges]
a : Optional[int] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : int = {}
@property
def __a ( self ) -> int:
return len(self.encoder )
def __a ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
a : Dict = re.sub("([.,!?()])" , R" \1" , lowerCAmelCase__ )
a : Optional[Any] = re.sub("(')" , R" \1 " , lowerCAmelCase__ )
a : Tuple = re.sub(R"\s{2,}" , " " , lowerCAmelCase__ )
if "\n" in token:
a : Dict = token.replace("\n" , " __newln__" )
a : Tuple = token.split(" " )
a : Optional[int] = []
for token in tokens:
if not len(lowerCAmelCase__ ):
continue
a : Tuple = token.lower()
a : List[Any] = tuple(lowerCAmelCase__ )
a : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
a : Tuple = get_pairs(lowerCAmelCase__ )
if not pairs:
words.append(lowerCAmelCase__ )
continue
while True:
a : Any = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a : Union[str, Any] = bigram
a : Optional[Any] = []
a : int = 0
while i < len(lowerCAmelCase__ ):
try:
a : Optional[int] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
new_word.extend(word[i:j] )
a : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a : List[str] = tuple(lowerCAmelCase__ )
a : Dict = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
a : Dict = get_pairs(lowerCAmelCase__ )
a : List[Any] = "@@ ".join(lowerCAmelCase__ )
a : str = word[:-4]
a : Optional[Any] = word
words.append(lowerCAmelCase__ )
return " ".join(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
a : Tuple = []
a : int = re.findall(R"\S+\n?" , lowerCAmelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase__ ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCAmelCase__ ) -> int:
a : Dict = token.lower()
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCAmelCase__ ) -> str:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __a ( self , lowerCAmelCase__ ) -> str:
a : Tuple = " ".join(lowerCAmelCase__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" )
a : List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
a : Any = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
| 353 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a : Dict = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a : List[str] = 10
a : Optional[int] = 256
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[MinHash]:
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
a : Any = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __UpperCamelCase :
def __init__( self , *,
lowerCAmelCase__ = 0.85 , ) -> Any:
a : Any = duplication_jaccard_threshold
a : Dict = NUM_PERM
a : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a : List[str] = defaultdict(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : Any = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __a ( self ) -> List[List[Dict]]:
a : Any = []
for base, duplicates in self._duplicate_clusters.items():
a : Any = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
a : Optional[int] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __a ( self , lowerCAmelCase__ ) -> None:
a : Optional[int] = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a, a : Optional[Any] = element
a : Any = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] ) ->Optional[int]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float ) ->Dict:
'''simple docstring'''
a : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->float:
'''simple docstring'''
a : Any = get_tokens(_lowercase )
a : List[str] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a : Dict = None
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = []
for elementa in cluster:
a : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
global _shared_dataset
a : Tuple = dataset
a : List[Any] = []
a : int = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
a : str = make_duplicate_clusters(_lowercase , _lowercase )
a : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a : List[Any] = {}
a : str = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
a : Optional[Any] = element
a : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
a : Union[str, Any] = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a : Union[str, Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
a : Optional[int] = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(_lowercase )}""" )
print(F"""Number of duplicate clusters: {len(_lowercase )}""" )
print(F"""Files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Filtered dataset size: {len(_lowercase )}""" )
return ds_filter, duplicate_clusters
| 79 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any]=0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[str] =np.random.RandomState(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.7_5,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: Dict =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE_: int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE_: Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# warmup pass to apply optimizations
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(**self.get_dummy_inputs() )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: str =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE_: Tuple =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: Optional[int] =np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE_: Optional[int] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: Dict =np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_: int =pipe(**lowerCAmelCase ).images
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =ort.SessionOptions()
SCREAMING_SNAKE_CASE_: List[str] =False
return options
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
SCREAMING_SNAKE_CASE_: List[str] =init_image.resize((768, 512) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_: Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int ="""A fantasy landscape, trending on artstation"""
SCREAMING_SNAKE_CASE_: List[Any] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_: int =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: int =output.images
SCREAMING_SNAKE_CASE_: List[str] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
SCREAMING_SNAKE_CASE_: List[str] =init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_: Any =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
SCREAMING_SNAKE_CASE_: Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] ="""A fantasy landscape, trending on artstation"""
SCREAMING_SNAKE_CASE_: List[Any] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_: Dict =pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: int =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 173 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: str =evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 173 | 1 |
from __future__ import annotations
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Tuple = len(lowerCamelCase ) // 2
# choose the middle 3 elements
UpperCamelCase_: Union[str, Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = """ybelkada/fonts"""
def A__ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
requires_backends(lowerCamelCase , ["""torch"""] )
_check_torch_version()
UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 )
UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image:
requires_backends(lowerCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 )
UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase )
UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase )
elif font_path is not None:
UpperCamelCase_: List[Any] = font_path
else:
UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" )
UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding
UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding
UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase )
return image
def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
requires_backends(lowerCamelCase , """vision""" )
# Convert to PIL image if necessary
UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_: Tuple = max(header_image.width , image.width )
UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) )
UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase )
if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flattened_patches"""]
def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: List[Any] = do_convert_rgb
UpperCamelCase_: Tuple = max_patches
UpperCamelCase_: Tuple = is_vqa
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ )
# maximize scale s.t.
UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_: str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = patches.shape
UpperCamelCase_: List[str] = patches_shape[1]
UpperCamelCase_: Optional[Any] = patches_shape[2]
UpperCamelCase_: List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa )
UpperCamelCase_: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ )
return result
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ):
if image.dtype == np.uinta:
UpperCamelCase_: List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_: str = np.mean(snake_case_ )
UpperCamelCase_: str = np.std(snake_case_ )
UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_: Tuple = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCamelCase_: Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [header_text] * len(snake_case_ )
UpperCamelCase_: str = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_: str = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_: Optional[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 223 | 1 |
"""simple docstring"""
import torch
from torch import nn
class UpperCamelCase ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=False ) -> Union[str, Any]:
super().__init__()
_a : Union[str, Any] = n_token
_a : Optional[Any] = d_embed
_a : Optional[Any] = d_proj
_a : Dict = cutoffs + [n_token]
_a : Union[str, Any] = [0] + self.cutoffs
_a : Any = div_val
_a : List[str] = self.cutoffs[0]
_a : List[str] = len(self.cutoffs ) - 1
_a : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_a : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_a : Dict = nn.Parameter(torch.zeros(self.n_clusters ) )
_a : Optional[Any] = nn.ModuleList()
_a : str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
else:
self.out_projs.append(UpperCAmelCase__ )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
_a , _a : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase__ , r_idx - l_idx ) )
_a : List[Any] = keep_order
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> List[str]:
if proj is None:
_a : Optional[Any] = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_a : str = nn.functional.linear(UpperCAmelCase__ , proj.t().contiguous() )
_a : Tuple = nn.functional.linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=False ) -> Dict:
if labels is not None:
# Shift so that tokens < n predict n
_a : Dict = hidden[..., :-1, :].contiguous()
_a : Dict = labels[..., 1:].contiguous()
_a : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
_a : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_a : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_a : Any = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_a : Dict = labels != -100
_a : Tuple = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
_a : List[Any] = (
-nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_a : Tuple = nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
_a , _a : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Tuple = self.out_layers[0].weight[l_idx:r_idx]
_a : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
_a : Tuple = self.out_layers[i].weight
_a : Dict = self.out_layers[i].bias
if i == 0:
_a : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
_a , _a , _a : List[Any] = weights[0], biases[0], self.out_projs[0]
_a : Optional[int] = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
if labels is None:
_a : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_a : List[Any] = torch.zeros_like(UpperCAmelCase__ , dtype=hidden.dtype , device=hidden.device )
_a : List[Any] = 0
_a : Any = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
_a , _a : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_a : List[str] = (labels >= l_idx) & (labels < r_idx)
_a : List[str] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_a : Optional[Any] = labels.index_select(0 , UpperCAmelCase__ ) - l_idx
_a : str = head_logprob.index_select(0 , UpperCAmelCase__ )
_a : Dict = hidden.index_select(0 , UpperCAmelCase__ )
else:
_a : Tuple = hidden
if i == 0:
if labels is not None:
_a : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_a : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a : int = weights[i], biases[i], self.out_projs[i]
_a : Optional[int] = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
_a : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_a : Optional[int] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_a : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_a : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowercase ( self : int , UpperCAmelCase__ : Tuple ) -> int:
if self.n_clusters == 0:
_a : Tuple = self._compute_logit(UpperCAmelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase__ , dim=-1 )
else:
# construct weights and biases
_a , _a : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_a , _a : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_a : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_a : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
_a : Optional[int] = self.out_layers[i].weight
_a : Optional[int] = self.out_layers[i].bias
if i == 0:
_a : List[str] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_a : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase__ )
biases.append(UpperCAmelCase__ )
_a , _a , _a : Tuple = weights[0], biases[0], self.out_projs[0]
_a : Dict = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_a : Optional[Any] = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
_a : Optional[int] = [0] + self.cutoffs
for i in range(len(UpperCAmelCase__ ) - 1 ):
_a , _a : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_a : Any = head_logprob[:, : self.cutoffs[0]]
else:
_a , _a , _a : Optional[Any] = weights[i], biases[i], self.out_projs[i]
_a : Union[str, Any] = self._compute_logit(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = nn.functional.log_softmax(UpperCAmelCase__ , dim=1 )
_a : List[Any] = head_logprob[:, -i] + tail_logprob_i
_a : Optional[Any] = logprob_i
return out
| 294 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : int = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 292 |
def snake_case (UpperCAmelCase__ ) -> int:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
UpperCamelCase_: List[Any] = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(UpperCAmelCase__ )
else:
UpperCamelCase_: str = sylvester(number - 1 )
UpperCamelCase_: str = num - 1
UpperCamelCase_: Any = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 292 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case_ = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case_ = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case_ = field(
default=_lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case_ = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case_ = field(
default=_lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', snake_case_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE__ : Dict =processors[data_args.task_name]()
SCREAMING_SNAKE_CASE__ : Any =processor.get_labels()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(snake_case_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : str =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=snake_case_, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE__ : Dict =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE__ : int =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=snake_case_, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE__ : Any =(
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=snake_case_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE__ : Dict =(
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=snake_case_, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(snake_case_, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE__ : int =DataCollatorWithPadding(snake_case_, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Optional[int] =Trainer(
model=snake_case_, args=snake_case_, train_dataset=snake_case_, eval_dataset=snake_case_, compute_metrics=snake_case_, data_collator=snake_case_, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Any ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE__ : List[Any] =trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(snake_case_, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', snake_case_, snake_case_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(snake_case_ )
return results
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 152 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a ( self : str ):
__UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCAmelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
@slow
def a ( self : str ):
__UpperCAmelCase = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__UpperCAmelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__UpperCAmelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__UpperCAmelCase = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 332 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( snake_case , snake_case , snake_case ) -> int:
def get_masked_lm_array(snake_case ):
_lowercase : Tuple = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Union[str, Any] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : Dict = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_array(snake_case ):
_lowercase : Union[str, Any] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Union[str, Any] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : Optional[Any] = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_layer_array(snake_case , snake_case ):
_lowercase : Tuple = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : List[str] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : Tuple = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_attention_layer_array(snake_case , snake_case , snake_case ):
_lowercase : Tuple = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : int = tf.train.load_variable(snake_case , snake_case )
_lowercase : List[str] = array.reshape(snake_case )
if "kernel" in name:
_lowercase : Optional[Any] = array.transpose()
return torch.from_numpy(snake_case )
print(F'''Loading model based on config from {config_path}...''' )
_lowercase : Dict = BertConfig.from_json_file(snake_case )
_lowercase : str = BertForMaskedLM(snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowercase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowercase : BertSelfAttention = layer.attention.self
_lowercase : str = get_encoder_attention_layer_array(
snake_case , "_query_dense/kernel" , self_attn.query.weight.data.shape )
_lowercase : Dict = get_encoder_attention_layer_array(
snake_case , "_query_dense/bias" , self_attn.query.bias.data.shape )
_lowercase : List[str] = get_encoder_attention_layer_array(
snake_case , "_key_dense/kernel" , self_attn.key.weight.data.shape )
_lowercase : List[str] = get_encoder_attention_layer_array(
snake_case , "_key_dense/bias" , self_attn.key.bias.data.shape )
_lowercase : List[str] = get_encoder_attention_layer_array(
snake_case , "_value_dense/kernel" , self_attn.value.weight.data.shape )
_lowercase : Union[str, Any] = get_encoder_attention_layer_array(
snake_case , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
_lowercase : BertSelfOutput = layer.attention.output
_lowercase : int = get_encoder_attention_layer_array(
snake_case , "_output_dense/kernel" , self_output.dense.weight.data.shape )
_lowercase : Dict = get_encoder_attention_layer_array(
snake_case , "_output_dense/bias" , self_output.dense.bias.data.shape )
_lowercase : Dict = get_encoder_layer_array(snake_case , "_attention_layer_norm/gamma" )
_lowercase : Optional[Any] = get_encoder_layer_array(snake_case , "_attention_layer_norm/beta" )
# Intermediate
_lowercase : BertIntermediate = layer.intermediate
_lowercase : int = get_encoder_layer_array(snake_case , "_intermediate_dense/kernel" )
_lowercase : str = get_encoder_layer_array(snake_case , "_intermediate_dense/bias" )
# Output
_lowercase : BertOutput = layer.output
_lowercase : Optional[int] = get_encoder_layer_array(snake_case , "_output_dense/kernel" )
_lowercase : int = get_encoder_layer_array(snake_case , "_output_dense/bias" )
_lowercase : List[Any] = get_encoder_layer_array(snake_case , "_output_layer_norm/gamma" )
_lowercase : Union[str, Any] = get_encoder_layer_array(snake_case , "_output_layer_norm/beta" )
# Embeddings
_lowercase : Tuple = get_encoder_array("_position_embedding_layer/embeddings" )
_lowercase : Dict = get_encoder_array("_type_embedding_layer/embeddings" )
_lowercase : Optional[int] = get_encoder_array("_embedding_norm_layer/gamma" )
_lowercase : Optional[int] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
_lowercase : Any = model.cls.predictions.transform
_lowercase : int = get_masked_lm_array("dense/kernel" )
_lowercase : Any = get_masked_lm_array("dense/bias" )
_lowercase : Optional[int] = get_masked_lm_array("layer_norm/gamma" )
_lowercase : Any = get_masked_lm_array("layer_norm/beta" )
_lowercase : Optional[int] = get_masked_lm_array("embedding_table" )
# Pooling
_lowercase : int = BertPooler(config=snake_case )
_lowercase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
_lowercase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(snake_case )
# Integration test - should load without any errors ;)
_lowercase : Dict = BertForMaskedLM.from_pretrained(snake_case )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_snake_case = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 199 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case , snake_case = None , snake_case = None ) -> None:
if start is None:
_lowercase : Dict = 0
if end is None:
_lowercase : List[Any] = len(snake_case ) - 1
if start >= end:
return
_lowercase : int = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
_lowercase , _lowercase : Any = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 199 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : List[str] = '''ResNetConfig'''
# Base docstring
__lowerCAmelCase : Tuple = '''microsoft/resnet-50'''
__lowerCAmelCase : List[str] = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCAmelCase : str = '''microsoft/resnet-50'''
__lowerCAmelCase : List[str] = '''tiger cat'''
__lowerCAmelCase : Dict = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _snake_case : int , _snake_case : int , _snake_case : int = 3 , _snake_case : int = 1 , _snake_case : str = "relu" ):
super().__init__()
__lowercase : Optional[Any] = nn.Convad(
A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_ )
__lowercase : Any = nn.BatchNormad(A_ )
__lowercase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case_ ( self : Union[str, Any] , _snake_case : Tensor ):
__lowercase : Tuple = self.convolution(A_ )
__lowercase : str = self.normalization(A_ )
__lowercase : Tuple = self.activation(A_ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : ResNetConfig ):
super().__init__()
__lowercase : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowercase : Any = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowercase : Dict = config.num_channels
def snake_case_ ( self : Any , _snake_case : Tensor ):
__lowercase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__lowercase : int = self.embedder(A_ )
__lowercase : List[str] = self.pooler(A_ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : int = 2 ):
super().__init__()
__lowercase : Optional[Any] = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ )
__lowercase : int = nn.BatchNormad(A_ )
def snake_case_ ( self : Any , _snake_case : Tensor ):
__lowercase : str = self.convolution(A_ )
__lowercase : Optional[int] = self.normalization(A_ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _snake_case : int , _snake_case : int , _snake_case : int = 1 , _snake_case : str = "relu" ):
super().__init__()
__lowercase : Tuple = in_channels != out_channels or stride != 1
__lowercase : str = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase : str = nn.Sequential(
ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , activation=A_ ) , )
__lowercase : List[Any] = ACTaFN[activation]
def snake_case_ ( self : Union[str, Any] , _snake_case : Dict ):
__lowercase : Optional[int] = hidden_state
__lowercase : Any = self.layer(A_ )
__lowercase : Optional[int] = self.shortcut(A_ )
hidden_state += residual
__lowercase : Tuple = self.activation(A_ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : int , _snake_case : int , _snake_case : int = 1 , _snake_case : str = "relu" , _snake_case : int = 4 ):
super().__init__()
__lowercase : Union[str, Any] = in_channels != out_channels or stride != 1
__lowercase : List[str] = out_channels // reduction
__lowercase : Union[str, Any] = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase : List[Any] = nn.Sequential(
ResNetConvLayer(A_ , A_ , kernel_size=1 ) , ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , )
__lowercase : int = ACTaFN[activation]
def snake_case_ ( self : Union[str, Any] , _snake_case : Dict ):
__lowercase : List[str] = hidden_state
__lowercase : Optional[Any] = self.layer(A_ )
__lowercase : Optional[Any] = self.shortcut(A_ )
hidden_state += residual
__lowercase : Dict = self.activation(A_ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _snake_case : ResNetConfig , _snake_case : int , _snake_case : int , _snake_case : int = 2 , _snake_case : int = 2 , ):
super().__init__()
__lowercase : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
__lowercase : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , stride=A_ , activation=config.hidden_act ) , *[layer(A_ , A_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case_ ( self : Tuple , _snake_case : Tensor ):
__lowercase : Union[str, Any] = input
for layer in self.layers:
__lowercase : List[str] = layer(A_ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : ResNetConfig ):
super().__init__()
__lowercase : Union[str, Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ):
self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_ ) )
def snake_case_ ( self : Union[str, Any] , _snake_case : Tensor , _snake_case : bool = False , _snake_case : bool = True ):
__lowercase : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase : int = hidden_states + (hidden_state,)
__lowercase : int = stage_module(A_ )
if output_hidden_states:
__lowercase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ , hidden_states=A_ , )
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ResNetConfig
A__ : Optional[int] = '''resnet'''
A__ : int = '''pixel_values'''
A__ : str = True
def snake_case_ ( self : Optional[Any] , _snake_case : Any ):
if isinstance(A_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case_ ( self : str , _snake_case : int , _snake_case : Union[str, Any]=False ):
if isinstance(A_ , A_ ):
__lowercase : Union[str, Any] = value
__lowerCAmelCase : Any = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase : Union[str, Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase_ , )
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : Any ):
super().__init__(A_ )
__lowercase : str = config
__lowercase : Union[str, Any] = ResNetEmbeddings(A_ )
__lowercase : Dict = ResNetEncoder(A_ )
__lowercase : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self : Optional[int] , _snake_case : Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None ):
__lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Tuple = self.embedder(A_ )
__lowercase : Tuple = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ )
__lowercase : List[Any] = encoder_outputs[0]
__lowercase : str = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase_ , )
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : List[str] ):
super().__init__(A_ )
__lowercase : Optional[Any] = config.num_labels
__lowercase : Union[str, Any] = ResNetModel(A_ )
# classification head
__lowercase : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self : int , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.LongTensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , ):
__lowercase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Dict = self.resnet(A_ , output_hidden_states=A_ , return_dict=A_ )
__lowercase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowercase : List[str] = self.classifier(A_ )
__lowercase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase : Optional[Any] = '''single_label_classification'''
else:
__lowercase : str = '''multi_label_classification'''
if self.config.problem_type == "regression":
__lowercase : List[str] = MSELoss()
if self.num_labels == 1:
__lowercase : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase : Dict = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
__lowercase : int = CrossEntropyLoss()
__lowercase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase : List[str] = BCEWithLogitsLoss()
__lowercase : Tuple = loss_fct(A_ , A_ )
if not return_dict:
__lowercase : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase_ , )
class __lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int ):
super().__init__(A_ )
super()._init_backbone(A_ )
__lowercase : List[Any] = [config.embedding_size] + config.hidden_sizes
__lowercase : Dict = ResNetEmbeddings(A_ )
__lowercase : Any = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC )
def snake_case_ ( self : List[str] , _snake_case : Tensor , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None ):
__lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase : Any = self.embedder(A_ )
__lowercase : str = self.encoder(A_ , output_hidden_states=A_ , return_dict=A_ )
__lowercase : Union[str, Any] = outputs.hidden_states
__lowercase : List[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowercase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
| 156 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Optional[Any] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A__ : Dict = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
_a = '''mask2former'''
_a = ['''swin''']
_a = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Any , A_ : Optional[Dict] = None , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 1_0_2_4 , A_ : str = "relu" , A_ : int = 6 , A_ : int = 1_0 , A_ : int = 8 , A_ : float = 0.0 , A_ : int = 2_0_4_8 , A_ : bool = False , A_ : bool = False , A_ : int = 4 , A_ : int = 2_5_5 , A_ : int = 1_0_0 , A_ : float = 0.1 , A_ : float = 2.0 , A_ : float = 5.0 , A_ : float = 5.0 , A_ : int = 1_2_5_4_4 , A_ : float = 3.0 , A_ : float = 0.75 , A_ : float = 0.02 , A_ : float = 1.0 , A_ : bool = True , A_ : List[int] = [4, 8, 1_6, 3_2] , A_ : bool = None , **A_ : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''')
lowerCAmelCase_ : int = CONFIG_MAPPING['''swin'''](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=A_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(A_ , A_):
lowerCAmelCase_ : List[Any] = backbone_config.pop('''model_type''')
lowerCAmelCase_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : List[Any] = config_class.from_dict(A_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported)}""")
lowerCAmelCase_ : List[Any] = backbone_config
lowerCAmelCase_ : str = feature_size
lowerCAmelCase_ : Optional[Any] = mask_feature_size
lowerCAmelCase_ : int = hidden_dim
lowerCAmelCase_ : int = encoder_feedforward_dim
lowerCAmelCase_ : Optional[int] = activation_function
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : Optional[Any] = decoder_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : List[str] = dim_feedforward
lowerCAmelCase_ : Optional[Any] = pre_norm
lowerCAmelCase_ : List[str] = enforce_input_projection
lowerCAmelCase_ : Tuple = common_stride
lowerCAmelCase_ : Optional[Any] = ignore_value
lowerCAmelCase_ : Optional[Any] = num_queries
lowerCAmelCase_ : int = no_object_weight
lowerCAmelCase_ : Tuple = class_weight
lowerCAmelCase_ : int = mask_weight
lowerCAmelCase_ : Dict = dice_weight
lowerCAmelCase_ : str = train_num_points
lowerCAmelCase_ : Dict = oversample_ratio
lowerCAmelCase_ : Tuple = importance_sample_ratio
lowerCAmelCase_ : List[str] = init_std
lowerCAmelCase_ : List[str] = init_xavier_std
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = feature_strides
lowerCAmelCase_ : int = output_auxiliary_logits
lowerCAmelCase_ : Optional[Any] = decoder_layers
super().__init__(**A_)
@classmethod
def UpperCAmelCase__ ( cls : List[str] , A_ : PretrainedConfig , **A_ : List[Any]):
return cls(
backbone_config=A_ , **A_ , )
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Dict = self.backbone_config.to_dict()
lowerCAmelCase_ : Optional[int] = self.__class__.model_type
return output
| 103 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""PerceiverFeatureExtractor"""]
__a = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Dict=7 , snake_case_ : str=3 , snake_case_ : List[str]=18 , snake_case_ : Tuple=30 , snake_case_ : int=400 , snake_case_ : Any=True , snake_case_ : List[str]=None , snake_case_ : List[str]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=True , ):
snake_case__ : List[str] = size if size is not None else {"""shortest_edge""": 20}
snake_case__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case__ : Tuple = parent
snake_case__ : Tuple = batch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = image_size
snake_case__ : str = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : int = size
snake_case__ : List[Any] = do_center_crop
snake_case__ : int = crop_size
snake_case__ : Dict = do_flip_channel_order
def lowerCamelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """do_flip_channel_order""" ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase ( self : str ):
pass
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : List[Any] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 43 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
__snake_case : List[Any] = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__ , os.listdir(UpperCAmelCase__ )[0] , "snapshots" ) )]
__snake_case : List[str] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase__ )
__snake_case : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 4
__snake_case : List[str] = jax.device_count()
__snake_case : Dict = num_samples * [prompt]
__snake_case : Optional[Any] = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Optional[int] = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__snake_case : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase__ ) == num_samples
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=UpperCAmelCase__ )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : int = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 50
__snake_case : Tuple = jax.device_count()
__snake_case : List[Any] = num_samples * [prompt]
__snake_case : Tuple = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : Dict = replicate(UpperCAmelCase__ )
__snake_case : Optional[Any] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Any = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : str = jax.random.PRNGKey(0 )
__snake_case : Tuple = 50
__snake_case : str = jax.device_count()
__snake_case : Union[str, Any] = num_samples * [prompt]
__snake_case : int = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : List[Any] = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : List[Any] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Any = 50
__snake_case : Any = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Dict = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[Any] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : Union[str, Any] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Any = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
__snake_case , __snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , )
__snake_case : Tuple = scheduler.create_state()
__snake_case : List[Any] = scheduler_state
__snake_case : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Any = jax.random.PRNGKey(0 )
__snake_case : Any = 50
__snake_case : Tuple = jax.device_count()
__snake_case : Tuple = num_samples * [prompt]
__snake_case : Optional[Any] = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : Tuple = replicate(UpperCAmelCase__ )
__snake_case : int = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : int = shard(UpperCAmelCase__ )
__snake_case : List[str] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : List[str] = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : int = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase__ )
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ , )
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = pipeline.prepare_inputs(UpperCAmelCase__ )
__snake_case : Union[str, Any] = shard(UpperCAmelCase__ )
__snake_case : Optional[int] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__snake_case : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ , use_memory_efficient_attention=UpperCAmelCase__ , )
__snake_case : str = replicate(UpperCAmelCase__ )
__snake_case : Tuple = pipeline.prepare_inputs(UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__snake_case : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 326 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowerCamelCase : Any , ):
UpperCamelCase :Union[str, Any] = parent
UpperCamelCase :Optional[Any] = 13
UpperCamelCase :Any = 7
UpperCamelCase :str = True
UpperCamelCase :int = True
UpperCamelCase :Tuple = True
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :int = True
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :str = False
UpperCamelCase :str = False
UpperCamelCase :Dict = 2
UpperCamelCase :int = 99
UpperCamelCase :str = 0
UpperCamelCase :List[Any] = 32
UpperCamelCase :Any = 2
UpperCamelCase :Optional[Any] = 4
UpperCamelCase :Any = 0.1
UpperCamelCase :int = 0.1
UpperCamelCase :Optional[Any] = 512
UpperCamelCase :Dict = 16
UpperCamelCase :Any = 2
UpperCamelCase :Any = 0.02
UpperCamelCase :Any = 3
UpperCamelCase :Union[str, Any] = 4
UpperCamelCase :str = '''last'''
UpperCamelCase :Tuple = True
UpperCamelCase :Any = None
UpperCamelCase :Dict = 0
def _A ( self : Dict ):
UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase :str = None
if self.use_input_lengths:
UpperCamelCase :Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase :int = None
if self.use_token_type_ids:
UpperCamelCase :str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase :Dict = None
UpperCamelCase :List[Any] = None
UpperCamelCase :Dict = None
if self.use_labels:
UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , ):
UpperCamelCase :Union[str, Any] = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase :Union[str, Any] = model(lowerCamelCase__ )
UpperCamelCase :List[str] = [input_ids, input_mask]
UpperCamelCase :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , ):
UpperCamelCase :List[Any] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase :Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : int , ):
UpperCamelCase :List[Any] = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase :str = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , ):
UpperCamelCase :Union[str, Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , ):
UpperCamelCase :Any = self.num_labels
UpperCamelCase :Optional[Any] = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , ):
UpperCamelCase :List[str] = self.num_choices
UpperCamelCase :int = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase :List[str] = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :Tuple = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :int = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase :Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : List[Any] ):
UpperCamelCase :str = self.prepare_config_and_inputs()
(
UpperCamelCase
) :Dict = config_and_inputs
UpperCamelCase :Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case__ : Any = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : str = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ : List[str] = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : str = False
def _A ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A ( self : List[str] ):
UpperCamelCase :Optional[Any] = TFFlaubertModelTester(self )
UpperCamelCase :List[str] = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def _A ( self : List[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def _A ( self : Tuple ):
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def _A ( self : Tuple ):
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def _A ( self : Dict ):
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def _A ( self : List[Any] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :int = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : List[Any] ):
UpperCamelCase :Any = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase :str = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase :int = model(lowerCamelCase__ )[0]
UpperCamelCase :List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase :Optional[int] = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 357 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , __magic_name__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 62 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a__ ( snake_case_ , unittest.TestCase ):
lowerCamelCase : Dict =XLMProphetNetTokenizer
lowerCamelCase : str =False
lowerCamelCase : List[str] =True
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(a ) , 10_12 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = XLMProphetNetTokenizer(a , keep_accents=a )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [3_53_89, 66_72, 49, 2]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 67 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: List[Any]="attention" ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__lowerCamelCase : List[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__lowerCamelCase : Optional[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__lowerCamelCase : str = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: Dict , _lowerCamelCase: List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
if split_mlp_wi:
__lowerCamelCase : Dict = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__lowerCamelCase : Optional[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__lowerCamelCase : Optional[Any] = (wi_a, wi_a)
else:
__lowerCamelCase : List[str] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__lowerCamelCase : str = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def lowercase_ ( _lowerCamelCase: dict , *, _lowerCamelCase: int , _lowerCamelCase: bool ) -> str:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
__lowerCamelCase : Tuple = {'''/'''.join(lowerCamelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowerCamelCase : int = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print("Split MLP:" , lowerCamelCase_ )
__lowerCamelCase : int = collections.OrderedDict()
# Shared embeddings.
__lowerCamelCase : Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(lowerCamelCase_ ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : Optional[int] = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , "encoder" , "pre_attention_layer_norm" )
__lowerCamelCase : Optional[int] = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , "encoder" , "attention" )
__lowerCamelCase : str = layer_norm
__lowerCamelCase : Any = k.T
__lowerCamelCase : List[str] = o.T
__lowerCamelCase : Tuple = q.T
__lowerCamelCase : List[str] = v.T
# Block i, layer 1 (MLP).
__lowerCamelCase : Tuple = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , "encoder" , "pre_mlp_layer_norm" )
__lowerCamelCase : List[Any] = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , "encoder" , lowerCamelCase_ )
__lowerCamelCase : Optional[int] = layer_norm
if split_mlp_wi:
__lowerCamelCase : Any = wi[0].T
__lowerCamelCase : str = wi[1].T
else:
__lowerCamelCase : List[str] = wi.T
__lowerCamelCase : str = wo.T
__lowerCamelCase : Dict = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__lowerCamelCase : Any = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase_ ):
# Block i, layer 0 (Self Attention).
__lowerCamelCase : Dict = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , "pre_self_attention_layer_norm" )
__lowerCamelCase : Tuple = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , "self_attention" )
__lowerCamelCase : Optional[Any] = layer_norm
__lowerCamelCase : Dict = k.T
__lowerCamelCase : str = o.T
__lowerCamelCase : int = q.T
__lowerCamelCase : Dict = v.T
# Block i, layer 1 (Cross Attention).
__lowerCamelCase : List[str] = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , "pre_cross_attention_layer_norm" )
__lowerCamelCase : int = tax_attention_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , "encoder_decoder_attention" )
__lowerCamelCase : str = layer_norm
__lowerCamelCase : Tuple = k.T
__lowerCamelCase : Union[str, Any] = o.T
__lowerCamelCase : Tuple = q.T
__lowerCamelCase : Dict = v.T
# Block i, layer 2 (MLP).
__lowerCamelCase : int = tax_layer_norm_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , "pre_mlp_layer_norm" )
__lowerCamelCase : Tuple = tax_mlp_lookup(lowerCamelCase_ , lowerCamelCase_ , "decoder" , lowerCamelCase_ )
__lowerCamelCase : List[Any] = layer_norm
if split_mlp_wi:
__lowerCamelCase : List[Any] = wi[0].T
__lowerCamelCase : Tuple = wi[1].T
else:
__lowerCamelCase : int = wi.T
__lowerCamelCase : Union[str, Any] = wo.T
__lowerCamelCase : str = old['''decoder/decoder_norm/scale''']
__lowerCamelCase : Any = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowerCamelCase : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: bool ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowerCamelCase : Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__lowerCamelCase : Optional[int] = state_dict['''shared.weight''']
return state_dict
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : str = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__lowerCamelCase : Tuple = convert_tax_to_pytorch(lowerCamelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase_ )
__lowerCamelCase : Optional[int] = make_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: str , _lowerCamelCase: bool = False ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = TaConfig.from_json_file(lowerCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowerCamelCase : Any = TaEncoderModel(lowerCamelCase_ )
else:
__lowerCamelCase : List[str] = TaForConditionalGeneration(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase_ )
print("Done" )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 355 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
__lowerCamelCase : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 64 | 0 |
'''simple docstring'''
import argparse
__SCREAMING_SNAKE_CASE : List[str] = """docs/source/_static/js/custom.js"""
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
with open(_UpperCAmelCase , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : List[Any] = f.readlines()
_UpperCAmelCase : Union[str, Any] = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
_UpperCAmelCase : List[str] = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
update_custom_js(args.version)
| 31 | '''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Dict , A : Optional[Any]=13 , A : Optional[Any]=7 , A : Union[str, Any]=True , A : Optional[Any]=True , A : int=False , A : str=True , A : Optional[Any]=99 , A : Union[str, Any]=32 , A : int=5 , A : Tuple=4 , A : Union[str, Any]=37 , A : Dict="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : Union[str, Any]=512 , A : int=16 , A : List[str]=2 , A : Tuple=0.02 , A : int=3 , A : List[str]=4 , A : str=None , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Any = use_input_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : List[str] = scope
def _A ( self : Optional[int] ):
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Dict ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def _A ( self : int , A : List[Any] , A : Any , A : int , A : Union[str, Any] , A : Dict , A : List[Any] , A : Dict ):
_UpperCAmelCase : List[str] = BioGptModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A )
_UpperCAmelCase : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : List[Any] , A : str , A : List[Any] , A : Dict , A : List[Any] , A : List[str] , A : Union[str, Any] , A : int , A : List[str] , A : Dict , ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : List[Any] , A : str , A : str , A : str , A : Any , A : List[str] , *A : Optional[int] ):
_UpperCAmelCase : str = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
_UpperCAmelCase : Optional[int] = self.seq_length // 2
_UpperCAmelCase : List[Any] = 0
# first forward pass
_UpperCAmelCase , _UpperCAmelCase : List[str] = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_UpperCAmelCase : List[str] = ids_tensor((1,) , A ).item() + 1
_UpperCAmelCase : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_UpperCAmelCase : Any = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Optional[int] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
_UpperCAmelCase : List[Any] = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Optional[Any] = model(A , past_key_values=A , attention_mask=A )["last_hidden_state"]
# select random slice
_UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : int , A : Dict , A : str , A : Dict , A : Union[str, Any] , A : Any , *A : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = BioGptModel(config=A ).to(A ).eval()
_UpperCAmelCase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
_UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , use_cache=A )
_UpperCAmelCase , _UpperCAmelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase : Dict = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_UpperCAmelCase : Any = model(A , attention_mask=A )["last_hidden_state"]
_UpperCAmelCase : Dict = model(A , attention_mask=A , past_key_values=A )[
"last_hidden_state"
]
# select random slice
_UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def _A ( self : Optional[Any] , A : Tuple , A : List[str] , A : Tuple , A : Dict , A : List[Any] , *A : Tuple , A : List[str]=False ):
_UpperCAmelCase : Optional[int] = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _A ( self : Optional[Any] , A : Any , *A : Optional[Any] ):
_UpperCAmelCase : Tuple = BioGptModel(A )
_UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _A ( self : Optional[int] , A : Dict , A : Tuple , A : Optional[int] , A : int , A : List[str] , *A : Dict ):
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Any = BioGptForTokenClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : int ):
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = config_and_inputs
_UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase: List[str] = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase: str = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Union[str, Any] = False
def _A ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = BioGptModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : Any ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*A )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
_UpperCAmelCase : Tuple = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : str = "left"
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase : Any = tokenizer.eos_token
_UpperCAmelCase : int = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase : Any = [
"Hello, my dog is a little",
"Today, I",
]
_UpperCAmelCase : Tuple = tokenizer(A , return_tensors="pt" , padding=A )
_UpperCAmelCase : Optional[Any] = inputs["input_ids"].to(A )
_UpperCAmelCase : Any = model.generate(
input_ids=A , attention_mask=inputs["attention_mask"].to(A ) , )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : List[Any] = model.generate(input_ids=A )
_UpperCAmelCase : List[Any] = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
_UpperCAmelCase : int = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(A )
_UpperCAmelCase : int = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase : Dict = tokenizer.batch_decode(A , skip_special_tokens=A )
_UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
_UpperCAmelCase : str = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def _A ( self : str ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def _A ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = 3
_UpperCAmelCase : List[str] = input_dict["input_ids"]
_UpperCAmelCase : Dict = input_ids.ne(1 ).to(A )
_UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase : List[str] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : Dict = "multi_label_classification"
_UpperCAmelCase : Optional[Any] = input_dict["input_ids"]
_UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(A )
_UpperCAmelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase : Optional[Any] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] )
_UpperCAmelCase : List[Any] = model(A )[0]
_UpperCAmelCase : int = 42384
_UpperCAmelCase : int = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
@slow
def _A ( self : Any ):
_UpperCAmelCase : str = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_UpperCAmelCase : Tuple = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = tokenizer("COVID-19 is" , return_tensors="pt" ).to(A )
_UpperCAmelCase : Dict = model.generate(
**A , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A , )
_UpperCAmelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
_UpperCAmelCase : List[str] = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(A , A )
| 31 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase: Optional[Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__lowercase: Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__lowercase: str = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__lowercase: List[Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 | 0 |
"""simple docstring"""
def __lowercase ( _a , _a ):
snake_case_ : Any = (boundary[1] - boundary[0]) / steps
snake_case_ : Tuple = boundary[0]
snake_case_ : Tuple = boundary[1]
snake_case_ : str = make_points(__a , __a , __a )
snake_case_ : Optional[int] = 0.0
y += (h / 2.0) * f(__a )
for i in x_i:
# print(i)
y += h * f(__a )
y += (h / 2.0) * f(__a )
return y
def __lowercase ( _a , _a , _a ):
snake_case_ : Any = a + h
while x < (b - h):
yield x
snake_case_ : Any = x + h
def __lowercase ( _a ): # enter your function here
snake_case_ : Optional[int] = (x - 0) * (x - 0)
return y
def __lowercase ( ):
snake_case_ : List[str] = 0.0 # Lower bound of integration
snake_case_ : Dict = 1.0 # Upper bound of integration
snake_case_ : Union[str, Any] = 10.0 # define number of steps or resolution
snake_case_ : Any = [a, b] # define boundary of integration
snake_case_ : Dict = method_a(__a , __a )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 264 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( __a : Dict[str, torch.Tensor] ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for rt in rc.restypes:
UpperCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ = {name: i for i, name in enumerate(__a )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
__a , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCamelCase__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ = rc.restype_atoa[restype_letter]
UpperCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ = rc.atom_order[atom_name]
UpperCamelCase__ = 1
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
return protein
def __magic_name__ ( __a : Dict[str, torch.Tensor] ):
'''simple docstring'''
UpperCamelCase__ = tree_map(lambda __a : torch.tensor(__a , device=batch["""aatype"""].device ) , __a , np.ndarray )
UpperCamelCase__ = tensor_tree_map(lambda __a : np.array(__a ) , make_atomaa_masks(__a ) )
return out
| 244 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = False
lowerCamelCase = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"""a""": 2, """c""": 2.2_5} )
@require_cuda
def _lowerCAmelCase ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCAmelCase =GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__A = Accelerator(kwargs_handlers=[ddp_scaler])
__A = torch.nn.Linear(100, 200)
__A = accelerator.prepare(model)
# Check the values changed in kwargs
__A = ''
__A = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 364 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatConfig()
lowerCamelCase_ = ""
if is_finetuned:
lowerCamelCase_ = UniSpeechSatForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatForPreTraining(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 47 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A ={
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 1 |
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase__ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(lowercase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ : str = set()
# Replace all the whitespace in our sentence
lowercase__ : int = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase ) == 26
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
lowercase__ : Tuple = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : Optional[Any] = True
elif char.isupper():
lowercase__ : Optional[int] = True
return all(UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __UpperCamelCase ( ):
from timeit import timeit
lowercase__ : List[Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=UpperCAmelCase ) )
print(timeit('''is_pangram_faster()''' , setup=UpperCAmelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 214 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ : str = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
lowercase__ : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = [1, 3, 0, 5, 8, 5]
__a: Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 214 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[float] = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Whether to SortishSamler or not."""} )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """whether to use adafactor"""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(default=UpperCAmelCase_ , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a__ : Optional[float] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a__ : Optional[str] = field(
default="""linear""" , metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 43 |
"""simple docstring"""
UpperCAmelCase__ = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 288 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCAmelCase__ : str ='''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __A ( unittest.TestCase , lowerCamelCase__ ):
def _snake_case ( self ):
lowerCamelCase =load_tool("""text-question-answering""" )
self.tool.setup()
lowerCamelCase =load_tool("""text-question-answering""" , remote=UpperCAmelCase_ )
def _snake_case ( self ):
lowerCamelCase =self.tool(UpperCAmelCase_ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCAmelCase_ , """launched the BigScience Research Workshop""" )
def _snake_case ( self ):
lowerCamelCase =self.remote_tool(UpperCAmelCase_ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCAmelCase_ , """launched the BigScience Research Workshop""" )
def _snake_case ( self ):
lowerCamelCase =self.tool(text=UpperCAmelCase_ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCAmelCase_ , """launched the BigScience Research Workshop""" )
def _snake_case ( self ):
lowerCamelCase =self.remote_tool(text=UpperCAmelCase_ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCAmelCase_ , """launched the BigScience Research Workshop""" )
| 354 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase =Vector()
def _snake_case ( self ):
lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2] )
lowerCamelCase =Vector([1, 2, 3, 4, 5] )
lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _snake_case ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase =x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _snake_case ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 262 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 1 |
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Optional[int] = len(lowercase )
_lowerCamelCase : Optional[int] = [0] * len_array
if len_array > 0:
_lowerCamelCase : List[Any] = array[0]
for i in range(1 , lowercase ):
_lowerCamelCase : List[str] = self.prefix_sum[i - 1] + array[i]
def A_ ( self , lowercase , lowercase ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A_ ( self , lowercase ):
_lowerCamelCase : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
) | 126 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase : Optional[int] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
return torch.atana(snake_case , snake_case ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[int]:
"""simple docstring"""
a : int = torch.sin(t * math.pi / 2 ) ** 2
a : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
super().__init__()
a : int = DiffusionAttnUnetaD(UpperCAmelCase_ , n_attn_layers=4)
a : Union[str, Any] = deepcopy(self.diffusion)
a : Tuple = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
a : Optional[Any] = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase : Tuple = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
UpperCamelCase : str = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
UpperCamelCase : Optional[int] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
UpperCamelCase : List[Any] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
UpperCamelCase : Union[str, Any] = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
UpperCamelCase : List[str] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> int:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple=13 ) -> Any:
"""simple docstring"""
a : str = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
a : Dict = 0
if string.startswith('net.3.' ):
depth += 1
a : Any = string[6:]
elif string.startswith('net.' ):
a : Optional[int] = string[4:]
while string.startswith('main.7.' ):
depth += 1
a : str = string[7:]
if string.startswith('main.' ):
a : Optional[Any] = string[5:]
# mid block
if string[:2].isdigit():
a : Tuple = string[:2]
a : int = string[2:]
else:
a : Any = string[0]
a : str = string[1:]
if depth == max_depth:
a : str = MID_NUM_TO_LAYER[layer_num]
a : int = 'mid_block'
elif depth > 0 and int(snake_case ) < 7:
a : int = DOWN_NUM_TO_LAYER[layer_num]
a : Optional[Any] = F"""down_blocks.{depth}"""
elif depth > 0 and int(snake_case ) > 7:
a : Any = UP_NUM_TO_LAYER[layer_num]
a : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
a : List[str] = DEPTH_0_TO_LAYER[layer_num]
a : List[Any] = F"""up_blocks.{max_depth - 1}""" if int(snake_case ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
a : Optional[int] = string_left[1:]
if "resnets" in new_layer:
a : int = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
a : Any = convert_attn_naming(snake_case )
a : int = new_string_left
if not isinstance(snake_case , snake_case ):
a : Tuple = prefix + '.' + new_layer + '.' + string_left
else:
a : List[Any] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : int = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
a : List[Any] = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
a : Any = transform_conv_attns(snake_case , snake_case , snake_case )
else:
a : Dict = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[int] ) -> str:
"""simple docstring"""
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
a : int = v[:, :, 0]
else:
# bias
a : List[str] = v
else:
# qkv matrices
a : Any = v.shape[0]
a : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
a : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
a : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
a : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
a : Any = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
a : Any = download(snake_case )
a : List[str] = MODELS_MAP[model_name]['sample_rate']
a : Optional[int] = MODELS_MAP[model_name]['sample_size']
a : Union[str, Any] = Object()
a : Any = sample_size
a : Dict = sample_rate
a : Dict = 0
a : List[str] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
a : Any = diffusers_model.state_dict()
a : Tuple = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )['state_dict'] )
a : int = orig_model.diffusion_ema.eval()
a : Tuple = orig_model.state_dict()
a : Optional[Any] = rename_orig_weights(snake_case )
a : Tuple = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
a : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(snake_case ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
a : str = value.squeeze()
a : Optional[int] = value
diffusers_model.load_state_dict(snake_case )
a : Union[str, Any] = 100
a : Dict = 33
a : str = IPNDMScheduler(num_train_timesteps=snake_case )
a : List[Any] = torch.manual_seed(snake_case )
a : List[str] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
a : int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
a : Tuple = get_crash_schedule(snake_case )
a : Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
a : Optional[Any] = torch.manual_seed(33 )
a : Optional[Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
a : Union[str, Any] = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
a : Union[str, Any] = generated.clamp(-1 , 1 )
a : Tuple = (generated - audio).abs().sum()
a : str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case )
print('Diff max' , snake_case )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCamelCase : int = parser.parse_args()
main(args)
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> None:
A_ = len(UpperCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], UpperCAmelCase__, UpperCAmelCase__, )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
A_ = []
depth_first_search([], [], [], UpperCAmelCase__, UpperCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCAmelCase__ )
print("""""" )
print(len(UpperCAmelCase__ ), """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 162 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
A_ = {}
A_ = job["""started_at"""]
A_ = job["""completed_at"""]
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = date_parser.parse(UpperCAmelCase__ )
A_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A_ = start
A_ = end
A_ = duration_in_min
return job_info
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> Union[str, Any]:
A_ = None
if token is not None:
A_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
A_ = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A_ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(UpperCAmelCase__ ):
A_ = requests.get(url + F'''&page={i + 2}''', headers=UpperCAmelCase__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(UpperCAmelCase__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = get_job_time(args.workflow_run_id)
__lowerCamelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 162 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : Optional[Any] = 192
__lowercase : Optional[Any] = 768
__lowercase : int = 12
__lowercase : Dict = 3
__lowercase : Tuple = [800, 1333]
__lowercase : Tuple = False
elif yolos_name == "yolos_s_dWr":
__lowercase : List[Any] = 330
__lowercase : List[str] = 14
__lowercase : Union[str, Any] = 6
__lowercase : Dict = 1320
elif "yolos_s" in yolos_name:
__lowercase : Optional[int] = 384
__lowercase : Tuple = 1536
__lowercase : str = 12
__lowercase : int = 6
elif "yolos_b" in yolos_name:
__lowercase : Optional[int] = [800, 1344]
__lowercase : str = 91
__lowercase : int = 'huggingface/label-files'
__lowercase : List[Any] = 'coco-detection-id2label.json'
__lowercase : int = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosConfig , lowerCAmelCase_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[: config.hidden_size, :]
__lowercase : Any = in_proj_bias[: config.hidden_size]
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : int = in_proj_weight[-config.hidden_size :, :]
__lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : str ):
if "backbone" in name:
__lowercase : Dict = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase : Optional[int] = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase : List[str] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase : List[Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Dict = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase : Optional[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase : int = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase : Any = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowercase : int = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
__lowercase : str = key.split(""".""" )
__lowercase : int = int(key_split[2] )
__lowercase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : Any = val[:dim, :]
__lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
__lowercase : Any = val[-dim:, :]
else:
__lowercase : Optional[int] = val[:dim]
__lowercase : List[str] = val[dim : dim * 2]
__lowercase : List[Any] = val[-dim:]
else:
__lowercase : Tuple = val
return orig_state_dict
def snake_case_ ( ):
__lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[Any] = get_yolos_config(UpperCAmelCase_ )
# load original state_dict
__lowercase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""" )['model']
# load 🤗 model
__lowercase : Optional[Any] = YolosForObjectDetection(UpperCAmelCase_ )
model.eval()
__lowercase : int = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : Optional[Any] = 800 if yolos_name != 'yolos_ti' else 512
__lowercase : Dict = YolosImageProcessor(format="""coco_detection""" , size=UpperCAmelCase_ )
__lowercase : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : int = model(**UpperCAmelCase_ )
__lowercase : List[str] = outputs.logits, outputs.pred_boxes
__lowercase : Dict = None, None
if yolos_name == "yolos_ti":
__lowercase : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Tuple = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : Optional[Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : Tuple = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : Tuple = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : str = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Dict = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
__lowercase : Any = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print("""Pushing to the hub...""" )
__lowercase : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCAmelCase_ , organization="""hustvl""" )
model.push_to_hub(UpperCAmelCase_ , organization="""hustvl""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 357 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 306 | 0 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = int(_lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 )
return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else ""
UpperCamelCase : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52 | '''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 0 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = 'PoolFormerConfig'
# Base docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = 'tabby, tabby cat'
__UpperCAmelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : float = 0.0 , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase_ :str = 1 - drop_prob
lowerCAmelCase_ :List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase_ :Any = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase_ :Union[str, Any] = input.div(lowercase__ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A = None ) -> None:
super().__init__()
lowerCAmelCase_ :int = drop_prob
def __lowerCAmelCase ( self , __A ) -> torch.Tensor:
return drop_path(__A , self.drop_prob , self.training )
def __lowerCAmelCase ( self ) -> str:
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A=None ) -> Dict:
super().__init__()
lowerCAmelCase_ :List[str] = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase_ :Any = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase_ :Optional[int] = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase_ :Dict = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
lowerCAmelCase_ :Any = norm_layer(__A ) if norm_layer else nn.Identity()
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :Any = self.projection(__A )
lowerCAmelCase_ :Union[str, Any] = self.norm(__A )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
def __init__( self , __A , **__A ) -> List[str]:
super().__init__(1 , __A , **__A )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
return self.pool(__A ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ :str = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :List[str] = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :Tuple = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
lowerCAmelCase_ :Optional[int] = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ :Tuple = config.hidden_act
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = self.conva(__A )
lowerCAmelCase_ :Union[str, Any] = self.act_fn(__A )
lowerCAmelCase_ :Union[str, Any] = self.drop(__A )
lowerCAmelCase_ :Dict = self.conva(__A )
lowerCAmelCase_ :Any = self.drop(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = PoolFormerPooling(__A )
lowerCAmelCase_ :List[Any] = PoolFormerOutput(__A , __A , __A , __A )
lowerCAmelCase_ :Union[str, Any] = PoolFormerGroupNorm(__A )
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(__A )
# Useful for training neural nets
lowerCAmelCase_ :Union[str, Any] = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase_ :Tuple = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase_ :str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
lowerCAmelCase_ :List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
if self.use_layer_scale:
lowerCAmelCase_ :Tuple = self.pooling(self.before_norm(__A ) )
lowerCAmelCase_ :Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase_ :List[str] = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Any = ()
lowerCAmelCase_ :Optional[Any] = self.output(self.after_norm(__A ) )
lowerCAmelCase_ :int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase_ :Tuple = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Tuple = (output,) + outputs
return outputs
else:
lowerCAmelCase_ :Any = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
lowerCAmelCase_ :Any = pooling_output + hidden_states
lowerCAmelCase_ :Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase_ :Optional[int] = self.drop_path(self.output(self.after_norm(__A ) ) )
lowerCAmelCase_ :Optional[int] = hidden_states + layer_output
lowerCAmelCase_ :List[Any] = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :Optional[Any] = config
# stochastic depth decay rule
lowerCAmelCase_ :Any = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase_ :int = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase_ :List[Any] = nn.ModuleList(__A )
# Transformer blocks
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase_ :Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
lowerCAmelCase_ :Tuple = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A=False , __A=True ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = () if output_hidden_states else None
lowerCAmelCase_ :Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase_ :int = layers
# Get patch embeddings from hidden_states
lowerCAmelCase_ :List[str] = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
lowerCAmelCase_ :int = blk(__A )
lowerCAmelCase_ :int = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase_ :Dict = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = PoolFormerConfig
UpperCAmelCase_ :Dict = "poolformer"
UpperCAmelCase_ :Union[str, Any] = "pixel_values"
UpperCAmelCase_ :Tuple = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCAmelCase ( self , __A , __A=False ) -> Tuple:
if isinstance(__A , __A ):
lowerCAmelCase_ :List[Any] = value
__UpperCAmelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[Any]:
super().__init__(__A )
lowerCAmelCase_ :Dict = config
lowerCAmelCase_ :str = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowerCAmelCase_ :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCAmelCase_ :Optional[Any] = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ :str = nn.Linear(config.hidden_size , config.hidden_size )
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> int:
super().__init__(__A )
lowerCAmelCase_ :List[str] = config.num_labels
lowerCAmelCase_ :Tuple = PoolFormerModel(__A )
# Final norm
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase_ :Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , __A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase_ :Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ :List[str] = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[int] = outputs[0]
lowerCAmelCase_ :Dict = self.classifier(self.norm(__A ).mean([-2, -1] ) )
lowerCAmelCase_ :Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ :str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ :List[Any] = """single_label_classification"""
else:
lowerCAmelCase_ :Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase_ :Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ :Optional[int] = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ :Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase_ :Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ :Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase_ :Optional[Any] = loss_fct(__A , __A )
if not return_dict:
lowerCAmelCase_ :List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowerCamelCase_ : List[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=None ):
_lowerCamelCase : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
_lowerCamelCase : Tuple = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = []
def __init__( self , lowercase , lowercase , lowercase , lowercase=None ):
_lowerCamelCase : int = obj
_lowerCamelCase : Dict = target
_lowerCamelCase : Optional[Any] = new
_lowerCamelCase : Dict = target.split('.' )[0]
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : Optional[int] = attrs or []
def __enter__( self ):
*_lowerCamelCase, _lowerCamelCase : Dict = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
_lowerCamelCase : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Union[str, Any] = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : List[str] = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
_lowerCamelCase : List[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
_lowerCamelCase : List[str] = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : Any = getattr(import_module('.'.join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
_lowerCamelCase : Any = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Any = globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def A_ ( self ):
self.__enter__()
self._active_patches.append(self )
def A_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='nllb-moe'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=12_81_12 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=10_24 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=1_28 , a_=64 , a_=4 , a_=4 , a_=0.001 , a_=0.001 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Optional[Any] = encoder_layers
__snake_case : Dict = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Dict = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Optional[Any] = dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[Any] = activation_function
__snake_case : int = init_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : Tuple = use_cache
__snake_case : Dict = encoder_layers
__snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : List[str] = router_z_loss_coef
__snake_case : int = router_aux_loss_coef
__snake_case : Union[str, Any] = decoder_sparse_step
__snake_case : Tuple = encoder_sparse_step
__snake_case : int = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Optional[int] = router_dtype
__snake_case : Tuple = router_ignore_padding_tokens
__snake_case : List[Any] = batch_prioritized_routing
__snake_case : Optional[int] = second_expert_policy
__snake_case : Any = normalize_router_prob_before_dropping
__snake_case : List[Any] = moe_eval_capacity_token_fraction
__snake_case : Optional[int] = moe_token_dropout
__snake_case : int = output_router_logits
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , **a_ , )
| 102 |
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
def snake_case ( A__ ):
# authorize twitter, initialize tweepy
UpperCAmelCase_ : Tuple = tweepy.OAuthHandler(lowercase_ ,lowercase_ )
auth.set_access_token(lowercase_ ,lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tweepy.API(lowercase_ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ : Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ : List[str] = api.user_timeline(screen_name=lowercase_ ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase_ )
# save the id of the oldest tweet less one
UpperCAmelCase_ : Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase_ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ : int = api.user_timeline(
screen_name=lowercase_ ,count=2_00 ,max_id=lowercase_ )
# save most recent tweets
alltweets.extend(lowercase_ )
# update the id of the oldest tweet less one
UpperCAmelCase_ : Tuple = alltweets[-1].id - 1
print(F"""...{len(lowercase_ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ : List[str] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" ,"w" ) as f:
UpperCAmelCase_ : int = csv.writer(lowercase_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowercase_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 357 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase_ = '''hf-internal-testing/tiny-random-bert'''
lowerCamelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCamelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase_ ) )
# File is cached at the same place the second time.
UpperCAmelCase_ : List[str] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Using a specific revision to test the full commit hash.
UpperCAmelCase_ : int = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="9b8c223" )
self.assertEqual(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "snapshots" , lowerCAmelCase_ , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
UpperCAmelCase_ : List[Any] = cached_file("tiny-random-bert" , lowerCAmelCase_ )
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
UpperCAmelCase_ : Optional[Any] = cached_file(lowerCAmelCase_ , lowerCAmelCase_ , revision="aaaa" )
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Union[str, Any] = cached_file(lowerCAmelCase_ , "conf" )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
with self.assertRaisesRegex(lowerCAmelCase_ , "does not appear to have a file named" ):
UpperCAmelCase_ : Any = cached_file(lowerCAmelCase_ , "conf" )
with open(os.path.join(lowerCAmelCase_ , "refs" , "main" ) ) as f:
UpperCAmelCase_ : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , ".no_exist" , lowerCAmelCase_ , "conf" ) ) )
UpperCAmelCase_ : str = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , local_files_only=lowerCAmelCase_ , _raise_exceptions_for_missing_entries=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
UpperCAmelCase_ : Any = mock.Mock()
UpperCAmelCase_ : List[str] = 500
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : List[Any] = HTTPError
UpperCAmelCase_ : List[str] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase_ ) as mock_head:
UpperCAmelCase_ : List[Any] = cached_file(lowerCAmelCase_ , "conf" , _raise_exceptions_for_connection_errors=lowerCAmelCase_ )
self.assertIsNone(lowerCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , lowerCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase_ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , lowerCAmelCase_ , revision="ahaha" )
UpperCAmelCase_ : int = get_file_from_repo("bert-base-cased" , lowerCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase_ : Optional[int] = json.loads(open(lowerCAmelCase_ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Union[str, Any] = Path(lowerCAmelCase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase_ , "a.txt" ) , str(lowerCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase_ , "b.txt" ) )
| 253 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BertJapaneseTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().setUp()
lowerCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
lowerCamelCase_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.get_input_output_texts(lowercase )
lowerCamelCase_ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
try:
lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
try:
lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = MecabTokenizer(do_lower_case=lowercase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> int:
try:
lowerCamelCase_ = MecabTokenizer(
do_lower_case=lowercase , normalize_text=lowercase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = MecabTokenizer(normalize_text=lowercase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = SudachiTokenizer(do_lower_case=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = SudachiTokenizer(normalize_text=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = SudachiTokenizer(trim_whitespace=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = JumanppTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = JumanppTokenizer(normalize_text=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = JumanppTokenizer(trim_whitespace=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCamelCase_ = {}
for i, token in enumerate(lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCamelCase_ = tokenizer.subword_tokenizer
lowerCamelCase_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowercase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCamelCase_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowercase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BertJapaneseTokenizer
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().setUp()
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
lowerCamelCase_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowercase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCamelCase_ = {}
for i, token in enumerate(lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = CharacterTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cl-tohoku/bert-base-japanese"
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCamelCase_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
UpperCamelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCamelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCamelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCamelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCamelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCamelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCamelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCamelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 221 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def a ( self : Any ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Dict ) -> int:
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 221 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=3 , _a=224 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
_A : Tuple = parent
_A : List[Any] = batch_size
_A : Any = num_channels
_A : Dict = image_size
_A : str = min_resolution
_A : int = max_resolution
_A : Union[str, Any] = do_resize
_A : Any = size
_A : Tuple = do_normalize
_A : int = image_mean
_A : Optional[int] = image_std
def a__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = ViTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processor
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processor
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : Dict = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def a__ ( self ) -> int:
# Initialize image_processor
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : int = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_A : Optional[Any] = image_processor(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 26 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__A : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 512 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get prompt text embeddings
UpperCamelCase : Optional[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCamelCase : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = text_embeddings.shape
UpperCamelCase : Optional[Any] = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Any = [""""""]
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='
f' {type(SCREAMING_SNAKE_CASE_ )}.' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
UpperCamelCase : str = negative_prompt
UpperCamelCase : Tuple = text_input_ids.shape[-1]
UpperCamelCase : Dict = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Optional[Any] = uncond_embeddings.shape[1]
UpperCamelCase : str = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
UpperCamelCase : str = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Any = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device )
UpperCamelCase : Tuple = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase : Any = torch.randn(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCamelCase : Tuple = latents_reference.to(self.device )
UpperCamelCase : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase : str = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase : Optional[int] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase : int = 0 if dx < 0 else dx
UpperCamelCase : Union[str, Any] = 0 if dy < 0 else dy
UpperCamelCase : List[Any] = max(-dx , 0 )
UpperCamelCase : Union[str, Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase : List[str] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : Any = {}
if accepts_eta:
UpperCamelCase : List[Any] = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Tuple = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase : int = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Tuple = noise_pred.chunk(2 )
UpperCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Dict = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = 1 / 0.18215 * latents
UpperCamelCase : Dict = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase : Any = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to(
self.device )
UpperCamelCase , UpperCamelCase : Optional[int] = self.safety_checker(
images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase : List[str] = None
if output_type == "pil":
UpperCamelCase : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 27 |
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : int ) ->tuple[float, list[float]]:
'''simple docstring'''
a : Tuple = list(range(len(_lowercase ) ) )
a : Tuple = [v / w for v, w in zip(_lowercase , _lowercase )]
index.sort(key=lambda _lowercase : ratio[i] , reverse=_lowercase )
a : float = 0
a : list[float] = [0] * len(_lowercase )
for i in index:
if weight[i] <= capacity:
a : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
a : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 8 ) ->str:
'''simple docstring'''
a : List[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
i -= len(_lowercase )
a : List[str] = i // 3
a : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a : int = (
chars_incl
+ random(_lowercase , quotient + remainder )
+ random(_lowercase , _lowercase )
+ random(_lowercase , _lowercase )
)
a : List[str] = list(_lowercase )
shuffle(_lowercase )
return "".join(_lowercase )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->List[str]:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Optional[int] ) ->int:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int = 8 ) ->bool:
'''simple docstring'''
if len(_lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
a : List[str] = any(char in ascii_uppercase for char in password )
a : Optional[int] = any(char in ascii_lowercase for char in password )
a : List[str] = any(char in digits for char in password )
a : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : Dict = int(input("Please indicate the max length of your password: " ).strip() )
a : str = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowercase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowercase , _lowercase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 105 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Any = 'vivit'
def __init__( self :Any ,_UpperCamelCase :Optional[Any]=2_2_4 ,_UpperCamelCase :List[Any]=3_2 ,_UpperCamelCase :List[str]=[2, 1_6, 1_6] ,_UpperCamelCase :Tuple=3 ,_UpperCamelCase :Optional[int]=7_6_8 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :Optional[int]=3_0_7_2 ,_UpperCamelCase :List[Any]="gelu_fast" ,_UpperCamelCase :Optional[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :str=0.02 ,_UpperCamelCase :Tuple=1E-0_6 ,_UpperCamelCase :Optional[int]=True ,**_UpperCamelCase :Tuple ,):
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Dict = image_size
snake_case_ : Union[str, Any] = num_frames
snake_case_ : Union[str, Any] = tubelet_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Optional[Any] = qkv_bias
super().__init__(**_UpperCamelCase ) | 8 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs | 8 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase__ = 1_00 ) -> int:
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Tuple = pre_numerator
UpperCAmelCase__ : Tuple = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : str = cur_numerator
UpperCAmelCase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 1 |
class lowercase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = name
SCREAMING_SNAKE_CASE : Optional[int] = value
SCREAMING_SNAKE_CASE : List[Any] = weight
def __repr__( self : List[Any] ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __A ( self : str ):
'''simple docstring'''
return self.value
def __A ( self : List[Any] ):
'''simple docstring'''
return self.name
def __A ( self : Dict ):
'''simple docstring'''
return self.weight
def __A ( self : List[Any] ):
'''simple docstring'''
return self.value / self.weight
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(_lowercase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(_lowercase , key=_lowercase , reverse=_lowercase )
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = 0.0, 0.0
for i in range(len(_lowercase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 | from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=0.01 , SCREAMING_SNAKE_CASE_ : int=1_0_0_0 ) -> Tuple:
lowercase_ = p_stop
lowercase_ = max_length
def __iter__( self : List[Any] ) -> Dict:
lowercase_ = 0
lowercase_ = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ = random.random() < self.p_stop
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Dict=True ) -> List[str]:
lowercase_ = [
BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
for i in range(2 )
]
lowercase_ = [list(SCREAMING_SNAKE_CASE_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(SCREAMING_SNAKE_CASE_ ) for shard in batch_sampler_shards] , [len(SCREAMING_SNAKE_CASE_ ) for e in expected] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
# Expected shouldn't change
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
# Check the shards when the dataset is very small.
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[[0, 1]], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
lowercase_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[], []]
self.check_batch_sampler_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> str:
lowercase_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
lowercase_ = [BatchSamplerShard(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ , even_batches=SCREAMING_SNAKE_CASE_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Any:
random.seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = [
IterableDatasetShard(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ , num_processes=SCREAMING_SNAKE_CASE_ , process_index=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ , )
for i in range(SCREAMING_SNAKE_CASE_ )
]
lowercase_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(SCREAMING_SNAKE_CASE_ )
iterable_dataset_lists.append(list(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) % shard_batch_size == 0 )
lowercase_ = []
for idx in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(SCREAMING_SNAKE_CASE_ ) < len(SCREAMING_SNAKE_CASE_ ):
reference += reference
self.assertListEqual(SCREAMING_SNAKE_CASE_ , reference[: len(SCREAMING_SNAKE_CASE_ )] )
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = 4_2
lowercase_ = RandomIterableDataset()
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
# Edge case with a very small dataset
lowercase_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
self.check_iterable_dataset_shards(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> List[Any]:
lowercase_ = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=SCREAMING_SNAKE_CASE_ )
lowercase_ = SkipBatchSampler(SCREAMING_SNAKE_CASE_ , 2 )
self.assertListEqual(list(SCREAMING_SNAKE_CASE_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Any:
lowercase_ = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DataLoader(list(range(1_6 ) ) , batch_size=4 )
lowercase_ = skip_first_batches(SCREAMING_SNAKE_CASE_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase ( self : Any ) -> Any:
Accelerator()
lowercase_ = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 30 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = (DPMSolverSinglestepScheduler,)
_snake_case : Optional[Any] = (("""num_inference_steps""", 25),)
def a__ ( self , **lowerCamelCase ):
__a = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase )
return config
def a__ ( self , lowerCamelCase=0 , **lowerCamelCase ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("num_inference_steps" , lowerCamelCase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**lowerCamelCase )
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__a = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a , __a = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self ):
pass
def a__ ( self , lowerCamelCase=0 , **lowerCamelCase ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("num_inference_steps" , lowerCamelCase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
__a = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
__a = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self , lowerCamelCase=None , **lowerCamelCase ):
if scheduler is None:
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**lowerCamelCase )
__a = scheduler_class(**lowerCamelCase )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**lowerCamelCase )
__a = scheduler_class(**lowerCamelCase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def a__ ( self ):
__a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__a = 50
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def a__ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def a__ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__a = self.full_loop(scheduler=lowerCamelCase )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__a = DEISMultistepScheduler.from_config(scheduler.config )
__a = DPMSolverMultistepScheduler.from_config(scheduler.config )
__a = UniPCMultistepScheduler.from_config(scheduler.config )
__a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__a = self.full_loop(scheduler=lowerCamelCase )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def a__ ( self ):
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def a__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def a__ ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
__a = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def a__ ( self ):
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def a__ ( self ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def a__ ( self ):
self.check_over_configs(variance_type=lowerCamelCase )
self.check_over_configs(variance_type="learned_range" )
def a__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def a__ ( self ):
__a = self.full_loop()
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def a__ ( self ):
__a = self.full_loop(use_karras_sigmas=lowerCamelCase )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def a__ ( self ):
__a = self.full_loop(prediction_type="v_prediction" )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def a__ ( self ):
__a = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase )
__a = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def a__ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
__a = scheduler_class(**lowerCamelCase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(lowerCamelCase , lowerCamelCase )
__a = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 359 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__:Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self , lowerCamelCase=None ):
__a = {}
if top_k is not None:
__a = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase , **lowerCamelCase ):
return super().__call__(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = load_image(lowerCamelCase )
__a = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self , lowerCamelCase ):
__a = self.model(**lowerCamelCase )
return model_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.softmax(-1 )[0]
__a , __a = probs.topk(lowerCamelCase )
elif self.framework == "tf":
__a = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a = tf.math.top_k(lowerCamelCase , k=lowerCamelCase )
__a , __a = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 268 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int ={
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] =[
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: str ) -> int:
UpperCAmelCase_ : List[Any] = """ylacombe/bark-small"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = """en_speaker_1"""
UpperCAmelCase_ : Optional[Any] = """This is a test string"""
UpperCAmelCase_ : int = """speaker_embeddings_path.json"""
UpperCAmelCase_ : Any = """speaker_embeddings"""
def A__ ( self: Tuple ,**lowerCamelCase_: List[str] ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def A__ ( self: str ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[Any] ) -> int:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
UpperCAmelCase_ : Optional[int] = 35
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 8
UpperCAmelCase_ : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : str = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""file.npz""" )
np.savez(lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string ,voice_preset=lowerCamelCase_ )
UpperCAmelCase_ : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(lowerCamelCase_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Union[str, Any] = processor(text=self.input_string ,voice_preset=self.voice_preset )
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = BarkProcessor(tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = processor(text=self.input_string )
UpperCAmelCase_ : str = tokenizer(
self.input_string ,padding="""max_length""" ,max_length=256 ,add_special_tokens=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 345 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = tempfile.mkdtemp()
# fmt: off
__a = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
__a = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__a = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
__a = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE)
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
__a = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE)
__a = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = self.prepare_image_inputs()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = processor(text=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = '''google/owlvit-base-patch32'''
__a = OwlViTProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = ['''cat''', '''nasa badge''']
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = 16
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = '''google/owlvit-base-patch32'''
__a = OwlViTProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = [['''cat''', '''nasa badge'''], ['''person''']]
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = 16
__a = len(__SCREAMING_SNAKE_CASE)
__a = max([len(__SCREAMING_SNAKE_CASE) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = '''google/owlvit-base-patch32'''
__a = OwlViTProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = ['''cat''', '''nasa badge''']
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = 16
__a = inputs['''input_ids''']
__a = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask'''])
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = self.prepare_image_inputs()
__a = self.prepare_image_inputs()
__a = processor(images=__SCREAMING_SNAKE_CASE , query_images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''query_pixel_values''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = OwlViTProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__SCREAMING_SNAKE_CASE)
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 370 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
__snake_case :List[Any] = '''▁'''
__snake_case :List[Any] = {'''vocab_file''': '''spiece.model'''}
__snake_case :Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__snake_case :List[Any] = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Any=[] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__a = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE)
return token
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE) + token
__a = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE)
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE)
return out_string.strip()
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(__SCREAMING_SNAKE_CASE , '''wb''') as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 131 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = KandinskyImgaImgPipeline
_lowerCAmelCase : List[Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_lowerCAmelCase : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowerCAmelCase : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase : Union[str, Any] = False
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return 1_00
@property
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCamelCase__ : str = MultilingualCLIP(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ : str = UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.dummy_text_encoder
UpperCamelCase__ : List[str] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_unet
UpperCamelCase__ : int = self.dummy_movq
UpperCamelCase__ : Optional[int] = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase__ : int = DDIMScheduler(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=0 ):
'''simple docstring'''
UpperCamelCase__ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE )
# create init_image
UpperCamelCase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase__ : int = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : str = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[str] = "cpu"
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : int = self.pipeline_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : int = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[str] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
UpperCamelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase__ : List[str] = "A red cartoon frog, 4k"
UpperCamelCase__ : Any = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
UpperCamelCase__ : Dict = pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ : List[Any] = pipe_prior(
SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase__ : Optional[Any] = pipeline(
SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="np" , )
UpperCamelCase__ : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) | 189 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] =logging.get_logger()
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : List[nn.Module] = field(default_factory=A__ )
_lowerCAmelCase : list = field(default_factory=A__ )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : nn.Module
_lowerCAmelCase : int = 0
_lowerCAmelCase : List = field(default_factory=A__ )
_lowerCAmelCase : List = field(default_factory=A__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : Any = Tracker(self.src )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : str = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.src_skip , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.dest_skip , SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> Optional[int]:
print(f'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
UpperCamelCase__ : List[Any] = ResNetForImageClassification(__lowerCAmelCase ).eval()
UpperCamelCase__ : Optional[Any] = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
UpperCamelCase__ : int = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = f'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
UpperCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Dict:
UpperCamelCase__ : Dict = "imagenet-1k-id2label.json"
UpperCamelCase__ : Optional[int] = 1000
UpperCamelCase__ : Any = (1, num_labels)
UpperCamelCase__ : Union[str, Any] = "huggingface/label-files"
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : str = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Optional[int] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase : int =parser.parse_args()
lowerCamelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 189 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__magic_name__: str = logging.getLogger(__name__)
@dataclass
class snake_case__ :
lowercase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class snake_case__ :
lowercase__ : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowercase__ : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
lowercase__ : Optional[int] = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
lowercase__ : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
lowercase__ : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
lowercase__ : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
lowercase__ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
lowercase__ : Optional[str] = field(default=_lowerCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
lowercase__ : Optional[int] = field(default=_lowerCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(_A, os.path.join(_A, f'{split}_results.json' ) )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(_A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""", _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__magic_name__ : Any = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(_A, _A, _A ):
assert hasattr(_A, _A ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_A, _A, getattr(_A, _A ) )
__magic_name__ : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__magic_name__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf=""".ckpt""" in model_args.model_name_or_path, config=_A, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(_A, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__magic_name__ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_A, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_A, _A ):
__magic_name__ : str = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__magic_name__ : Tuple = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__magic_name__ : List[str] = SeqaSeqDataset
# Get datasets
__magic_name__ : Union[str, Any] = (
dataset_class(
_A, type_path="""train""", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", )
if training_args.do_train
else None
)
__magic_name__ : Tuple = (
dataset_class(
_A, type_path="""val""", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__magic_name__ : List[str] = (
dataset_class(
_A, type_path="""test""", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", )
if training_args.do_predict
else None
)
# Initialize our Trainer
__magic_name__ : int = (
build_compute_metrics_fn(data_args.task, _A ) if training_args.predict_with_generate else None
)
__magic_name__ : Any = SeqaSeqTrainer(
model=_A, args=_A, data_args=_A, train_dataset=_A, eval_dataset=_A, data_collator=SeqaSeqDataCollator(
_A, _A, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=_A, tokenizer=_A, )
__magic_name__ : str = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
__magic_name__ : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__magic_name__ : str = train_result.metrics
__magic_name__ : Optional[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""", _A, training_args.output_dir )
all_metrics.update(_A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : Any = trainer.evaluate(metric_key_prefix="""val""" )
__magic_name__ : Optional[Any] = data_args.n_val
__magic_name__ : Union[str, Any] = round(metrics["""val_loss"""], 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""", _A, training_args.output_dir )
all_metrics.update(_A )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__magic_name__ : Dict = trainer.predict(test_dataset=_A, metric_key_prefix="""test""" )
__magic_name__ : Dict = test_output.metrics
__magic_name__ : Tuple = data_args.n_test
if trainer.is_world_process_zero():
__magic_name__ : Union[str, Any] = round(metrics["""test_loss"""], 4 )
handle_metrics("""test""", _A, training_args.output_dir )
all_metrics.update(_A )
if training_args.predict_with_generate:
__magic_name__ : int = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=_A, clean_up_tokenization_spaces=_A )
__magic_name__ : List[str] = lmap(str.strip, _A )
write_txt_file(_A, os.path.join(training_args.output_dir, """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_A, os.path.join(training_args.output_dir, """all_results.json""" ) )
return all_metrics
def UpperCamelCase ( _A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 138 |
import os
from pathlib import Path
def UpperCamelCase ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__magic_name__ : Dict = Path(_A ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__magic_name__ : Optional[int] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""", """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""", """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""", _A, with_cuda=_A, extra_include_paths=[str(_A )], extra_cflags=["""-DWITH_CUDA=1"""], extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 138 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class A_ ( _lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self :int , **lowerCamelCase_ :Any ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase__ : List[str] =deprecated_arg[3:]
lowerCamelCase__ : Any =not kwargs.pop(A_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCamelCase__ : Any =kwargs.pop('tpu_name' , self.tpu_name )
lowerCamelCase__ : Optional[int] =kwargs.pop('device_idx' , self.device_idx )
lowerCamelCase__ : int =kwargs.pop('eager_mode' , self.eager_mode )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('use_xla' , self.use_xla )
super().__init__(**A_ )
SCREAMING_SNAKE_CASE_ = field(
default=_lowercase , metadata={"""help""": """Name of TPU"""} , )
SCREAMING_SNAKE_CASE_ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
SCREAMING_SNAKE_CASE_ = field(default=_lowercase , metadata={"""help""": """Benchmark models in eager model."""} )
SCREAMING_SNAKE_CASE_ = field(
default=_lowercase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
lowerCamelCase__ : Union[str, Any] =None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCamelCase__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase__ : Any =None
return tpu
@cached_property
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCamelCase__ : List[str] =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowerCamelCase__ : int =tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowerCamelCase__ : Tuple =tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
return self.n_gpu > 0 | 126 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__snake_case = """bert-base-cased"""
__snake_case = """google/pegasus-xsum"""
__snake_case = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
__snake_case = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
__snake_case = """patrickvonplaten/t5-tiny-random"""
__snake_case = """sshleifer/bart-tiny-random"""
__snake_case = """sshleifer/tiny-mbart"""
__snake_case = """sshleifer/tiny-marian-en-de"""
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '\n'.join(UpperCamelCase_ )
Path(UpperCamelCase_ ).open('w' ).writelines(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase_ , F'{split}.source' ) , UpperCamelCase_ )
_dump_articles(os.path.join(UpperCamelCase_ , F'{split}.target' ) , UpperCamelCase_ )
return tmp_dir
class lowercase__ ( _UpperCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A_ ( self : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ = max(len(tokenizer.encode(UpperCAmelCase_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ = max(len(tokenizer.encode(UpperCAmelCase_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(
UpperCAmelCase_ , data_dir=UpperCAmelCase_ , type_path='train' , max_source_length=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCAmelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE__ = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A_ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ = max(len(tokenizer.encode(UpperCAmelCase_ ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE__ = max(len(tokenizer.encode(UpperCAmelCase_ ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = LegacySeqaSeqDataset(
UpperCAmelCase_ , data_dir=UpperCAmelCase_ , type_path='train' , max_source_length=20 , max_target_length=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCAmelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
SCREAMING_SNAKE_CASE__ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE__ = tmp_dir.joinpath('train.source' ).open().readlines()
SCREAMING_SNAKE_CASE__ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase_ , UpperCAmelCase_ , 128 , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE__ = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE__ = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase_ ) < len(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A_ ( self : List[Any] ):
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE__ = 64
SCREAMING_SNAKE_CASE__ = ds.make_dynamic_sampler(UpperCAmelCase_ , required_batch_size_multiple=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [len(UpperCAmelCase_ ) for x in batch_sampler]
assert len(set(UpperCAmelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) # no dropped or added examples
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for batch in data_loader:
SCREAMING_SNAKE_CASE__ = batch['input_ids'].shape
SCREAMING_SNAKE_CASE__ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE__ = np.product(batch['input_ids'].shape )
num_src_per_batch.append(UpperCAmelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase_ )
assert num_src_per_batch[0] == max(UpperCAmelCase_ )
if failures:
raise AssertionError(F'too many tokens in {len(UpperCAmelCase_ )} batches' )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = ds.make_sortish_sampler(UpperCAmelCase_ , shuffle=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict="input_ids" ):
return [batch[k].eq(UpperCAmelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase_ , k='labels' ) ) < sum(count_pad_tokens(UpperCAmelCase_ , k='labels' ) )
assert sum(count_pad_tokens(UpperCAmelCase_ ) ) < sum(count_pad_tokens(UpperCAmelCase_ ) )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=1000 , UpperCAmelCase_ : Any=128 ):
if os.getenv('USE_REAL_DATA' , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = 'examples/seq2seq/wmt_en_ro'
SCREAMING_SNAKE_CASE__ = max_len * 2 * 64
if not Path(UpperCAmelCase_ ).joinpath('train.len' ).exists():
save_len_file(UpperCAmelCase_ , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = 'examples/seq2seq/test_data/wmt_en_ro'
SCREAMING_SNAKE_CASE__ = max_len * 4
save_len_file(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(
UpperCAmelCase_ , data_dir=UpperCAmelCase_ , type_path='train' , max_source_length=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , )
return ds, max_tokens, tokenizer
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._get_dataset()
SCREAMING_SNAKE_CASE__ = set(DistributedSortishSampler(UpperCAmelCase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = set(DistributedSortishSampler(UpperCAmelCase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCAmelCase_ ) )
assert idsa.intersection(UpperCAmelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A_ ( self : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ , use_fast=UpperCAmelCase_ )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(
UpperCAmelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
SCREAMING_SNAKE_CASE__ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE__ = SeqaSeqDataset(
UpperCAmelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE__ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase_ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase_ ) == 0
| 169 |
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : str ):
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = """ABAABA"""
__snake_case = """AB"""
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 169 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
return (preds == labels).mean()
@dataclass
class A__ :
lowerCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
lowerCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
lowerCAmelCase__ : str = field(metadata={"help": "Should contain the data files for the task."} )
lowerCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__lowercase = processors[data_args.task_name]()
__lowercase = processor.get_labels()
__lowercase = len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowercase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
__lowercase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__lowercase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowercase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase = trainer.evaluate()
__lowercase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 325 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : List[Any] = logging.get_logger()
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = field(default_factory=UpperCAmelCase )
def lowerCamelCase__ ( self : str , A : Any , A : Tensor , A : Tensor ):
'''simple docstring'''
a : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self : List[Any] , A : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 1
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = True
def __call__( self : List[str] , A : Tensor ):
'''simple docstring'''
a : Any = Tracker(self.dest )(A ).parametrized
a : Any = Tracker(self.src )(A ).parametrized
a : Union[str, Any] = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
a : Any = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(A )} operations while'''
F''' destination module has {len(A )}.''' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : Any , A : nn.Module ):
'''simple docstring'''
super().__init__()
a : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
a : int = len(A ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a : Union[str, Any] = nn.ModuleDict(A )
def lowerCamelCase__ ( self : List[str] , A : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
A , out_feat_keys=A , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : List[Any] , A : str ):
'''simple docstring'''
a : List[Any] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Tuple , A : str ):
'''simple docstring'''
if x not in self:
a : str = self.convert_name_to_timm(A )
a : str = partial(lambda: (timm.create_model(A , pretrained=A ).eval(), None) )
else:
a : List[str] = super().__getitem__(A )
return val
class snake_case ( UpperCAmelCase ):
def __getitem__( self : Optional[int] , A : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
a : List[str] = RegNetModel
else:
a : Optional[int] = RegNetForImageClassification
return val
def snake_case (A_ :Union[str, Any] , A_ :Dict , A_ :List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
a : List[Any] = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case (A_ :str , A_ :Callable[[], nn.Module] , A_ :Callable[[], nn.Module] , A_ :RegNetConfig , A_ :Path , A_ :bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
a, a : int = from_model_func()
a : List[str] = our_model_func(A_ ).eval()
a : List[Any] = ModuleTransfer(src=A_ , dest=A_ , raise_if_mismatch=A_ )
a : Tuple = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(A_ )
if from_state_dict is not None:
a : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a : List[Any] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
a : Optional[Any] = manually_copy_vissl_head(A_ , our_model.state_dict() , A_ )
our_model.load_state_dict(A_ )
a : Optional[int] = our_model(A_ , output_hidden_states=A_ )
a : List[Any] = (
our_outputs.logits if isinstance(A_ , A_ ) else our_outputs.last_hidden_state
)
a : Tuple = from_model(A_ )
a : Dict = from_output[-1] if type(A_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(A_ , A_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=A_ , )
a : List[str] = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
a : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=A_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=A_ , )
print(f'''Pushed {name}''' )
def snake_case (A_ :Path , A_ :str = None , A_ :bool = True ):
'''simple docstring'''
a : Union[str, Any] = 'imagenet-1k-id2label.json'
a : Dict = 1_0_0_0
a : int = (1, num_labels)
a : str = 'huggingface/label-files'
a : Dict = num_labels
a : Optional[Any] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type='dataset' ) ) , 'r' ) )
a : Optional[int] = {int(A_ ): v for k, v in idalabel.items()}
a : int = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
a : int = partial(A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ )
a : List[str] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
a : Union[str, Any] = NameToOurModelFuncMap()
a : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A_ :str , A_ :Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a : Tuple = torch.hub.load_state_dict_from_url(A_ , model_dir=str(A_ ) , map_location='cpu' )
a : Dict = model_func()
# check if we have a head, if yes add it
a : List[str] = files['classy_state_dict']['base_model']['model']
a : int = model_state_dict['trunk']
model.load_state_dict(A_ )
return model.eval(), model_state_dict["heads"]
# pretrained
a : str = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : List[Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : Any = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a : Union[str, Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a : int = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : Union[str, Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : int = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a : Tuple = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A_ , A_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A_ , A_ , A_ , )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 186 |
"""simple docstring"""
_UpperCamelCase : List[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186 | 1 |
def UpperCamelCase ( __lowerCamelCase : list ):
if any(not isinstance(__lowerCamelCase , __lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 59 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = """\
Text data.
Second line of data."""
lowerCAmelCase__ = """file"""
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A__ = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> List[str]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A__ = input_paths[compression_format]
A__ = tmp_path / "cache"
A__ = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = "custom_cache"
A__ = "custom_extracted_dir"
A__ = tmp_path / "custom_extracted_path"
if default_extracted:
A__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
A__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ = xz_file
A__ = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
A__ = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
A__ = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
A__ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
A__ = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = get_from_cache(F'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> int:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> str:
'''simple docstring'''
A__ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 68 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( _a : Optional[int] , _a : Any , _a : List[str] , _a : List[str]=1024 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], []
UpperCAmelCase_ : Optional[int] = list(zip(_a , _a ) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = sorted_examples[0]
def is_too_big(_a : Union[str, Any] ):
return tok(_a , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase_ : List[str] = new_src + """ """ + src
UpperCAmelCase_ : List[Any] = new_tgt + """ """ + tgt
if is_too_big(_a ) or is_too_big(_a ): # cant fit, finalize example
finished_src.append(_a )
finished_tgt.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = src, tgt
else: # can fit, keep adding
UpperCAmelCase_ , UpperCAmelCase_ : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_a )
finished_tgt.append(_a )
return finished_src, finished_tgt
def lowerCamelCase_ ( _a : int , _a : Path , _a : Optional[Any] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Path(_a )
save_path.mkdir(exist_ok=_a )
for split in ["train"]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
UpperCAmelCase_ : str = [x.rstrip() for x in Path(_a ).open().readlines()]
UpperCAmelCase_ : Any = [x.rstrip() for x in Path(_a ).open().readlines()]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = pack_examples(_a , _a , _a , _a )
print(F'''packed {split} split from {len(_a )} examples -> {len(_a )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(_a ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(_a ) )
for split in ["val", "test"]:
UpperCAmelCase_ , UpperCAmelCase_ : int = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_a , save_path / F'''{split}.source''' )
shutil.copyfile(_a , save_path / F'''{split}.target''' )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=_a , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=_a , default=128 )
parser.add_argument("""--data_dir""" , type=_a )
parser.add_argument("""--save_path""" , type=_a )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 59 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = ["pixel_values"]
def __init__( self: List[str] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: bool = True ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: bool = True ,lowerCamelCase_: Union[int, float] = 1 / 255 ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: bool = True ,**lowerCamelCase_: List[Any] ,) -> None:
super().__init__(**lowerCamelCase_ )
UpperCAmelCase_ : int = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase_ : Any = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase_ : List[Any] = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ ,param_name="""crop_size""" )
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : str = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[Any] = do_convert_rgb
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Dict ,) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ : Dict = get_resize_output_image_size(lowerCamelCase_ ,size=size["""shortest_edge"""] ,default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Dict[str, int] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[Any] ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Any ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[int, float] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: Optional[int] ,) -> str:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: np.ndarray ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Union[float, List[float]] ,lowerCamelCase_: Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase_: List[str] ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: ImageInput ,lowerCamelCase_: bool = None ,lowerCamelCase_: Dict[str, int] = None ,lowerCamelCase_: PILImageResampling = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: int = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: float = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: Optional[Union[float, List[float]]] = None ,lowerCamelCase_: bool = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase_: Union[str, Any] ,) -> PIL.Image.Image:
UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Dict = get_size_dict(lowerCamelCase_ ,param_name="""size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" ,default_to_square=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Tuple = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(lowerCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=lowerCamelCase_ ,size=lowerCamelCase_ ,resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Tuple = [self.center_crop(image=lowerCamelCase_ ,size=lowerCamelCase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[int] = [self.rescale(image=lowerCamelCase_ ,scale=lowerCamelCase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[Any] = [self.normalize(image=lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
UpperCAmelCase_ : Any = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 59 | 1 |
"""simple docstring"""
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = 0
for i in range(1 , 1001 ):
total += i**i
return str(_SCREAMING_SNAKE_CASE )[-10:]
if __name__ == "__main__":
print(solution())
| 293 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Dict = BlipImageProcessor()
lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCAmelCase : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase : Optional[int] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = image_processor(snake_case__ , return_tensors="np" )
lowerCAmelCase : Optional[int] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Optional[int] = "lower newer"
lowerCAmelCase : int = processor(text=snake_case__ )
lowerCAmelCase : int = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : str = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : List[str] = "lower newer"
lowerCAmelCase : List[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Dict = processor.batch_decode(snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : int = "lower newer"
lowerCAmelCase : List[Any] = self.prepare_image_inputs()
lowerCAmelCase : Any = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 351 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 133 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(0 ) != 0 )
def lowercase_ ( ):
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 25 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = list(range(len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCamelCase : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCamelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=1_0_0 , UpperCAmelCase__ : str=" " ):
SCREAMING_SNAKE_CASE = text.split(UpperCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )]
def __lowerCamelCase (UpperCAmelCase__ : dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(UpperCAmelCase__ ):
titles.append(title if title is not None else "" )
texts.append(UpperCAmelCase__ )
return {"title": titles, "text": texts}
def __lowerCamelCase (UpperCAmelCase__ : dict , UpperCAmelCase__ : DPRContextEncoder , UpperCAmelCase__ : DPRContextEncoderTokenizerFast ):
SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=UpperCAmelCase__ , padding="longest" , return_tensors="pt" )["input_ids"]
SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCamelCase (UpperCAmelCase__ : "RagExampleArguments" , UpperCAmelCase__ : "ProcessingArguments" , UpperCAmelCase__ : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE = dataset.map(UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE = dataset.map(
partial(UpperCAmelCase__ , ctx_encoder=UpperCAmelCase__ , ctx_tokenizer=UpperCAmelCase__ ) , batched=UpperCAmelCase__ , batch_size=processing_args.batch_size , features=UpperCAmelCase__ , )
# And finally save your dataset
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(UpperCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=UpperCAmelCase__ )
# And save the index
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase :
lowercase__ : str = field(
default=str(Path(a ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowercase__ : Optional[str] = field(
default=a , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowercase__ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowercase__ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowercase__ : Optional[str] = field(
default=str(Path(a ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class lowercase :
lowercase__ : Optional[int] = field(
default=a , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowercase__ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class lowercase :
lowercase__ : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowercase__ : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCamelCase : Optional[int] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 206 | from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
try:
with open(UpperCAmelCase__ , "rb" ) as flax_state_f:
SCREAMING_SNAKE_CASE = from_bytes(UpperCAmelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCAmelCase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase__ : x.dtype == jnp.bfloataa , UpperCAmelCase__ ) ).values()
if any(UpperCAmelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda UpperCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = flatten_dict(UpperCAmelCase__ , sep="." )
SCREAMING_SNAKE_CASE = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
SCREAMING_SNAKE_CASE = ".".join(UpperCAmelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase__ ) if not isinstance(UpperCAmelCase__ , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCAmelCase__ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase__ )
pt_model.load_state_dict(UpperCAmelCase__ )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 206 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_encoder_blocks' ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=[2, 2, 2, 2] , lowerCAmelCase=[8, 4, 2, 1] , lowerCAmelCase=[16, 32, 64, 1_28] , lowerCAmelCase=[1, 4, 8, 16] , lowerCAmelCase=[1, 2, 4, 8] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = sr_ratios
snake_case = depths
snake_case = hidden_sizes
snake_case = downsampling_rates
snake_case = num_attention_heads
snake_case = is_training
snake_case = use_labels
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = num_labels
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = SegformerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
snake_case = snake_case = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = SegformerForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 1
snake_case = SegformerForSemanticSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase )
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Any = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Dict = False
def snake_case ( self ):
"""simple docstring"""
snake_case = SegformerModelTester(self )
snake_case = SegformerConfigTester(self , config_class=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase )
@unittest.skip('SegFormer does not use inputs_embeds' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
snake_case = True
snake_case = False
snake_case = True
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.attentions
snake_case = sum(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case = True
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first attentions (first block, first layer)
snake_case = (self.model_tester.image_size // 4) ** 2
snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case = (self.model_tester.image_size // 32) ** 2
snake_case = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case = len(lowerCAmelCase )
# Check attention is always last and order is fine
snake_case = True
snake_case = True
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase ) )
snake_case = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first attentions (first block, first layer)
snake_case = (self.model_tester.image_size // 4) ** 2
snake_case = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.hidden_states
snake_case = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ):
continue
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
snake_case = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
snake_case = model(**lowerCAmelCase ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = SegformerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCAmelCase__ ( ) -> str:
"""simple docstring"""
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase , align=lowerCAmelCase , do_random_crop=lowerCAmelCase )
snake_case = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase )
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' )
snake_case = encoded_inputs.pixel_values.to(lowerCAmelCase )
with torch.no_grad():
snake_case = model(lowerCAmelCase )
snake_case = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
snake_case = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase , align=lowerCAmelCase , do_random_crop=lowerCAmelCase )
snake_case = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(lowerCAmelCase )
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' )
snake_case = encoded_inputs.pixel_values.to(lowerCAmelCase )
with torch.no_grad():
snake_case = model(lowerCAmelCase )
snake_case = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
snake_case = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase , atol=1E-1 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase , align=lowerCAmelCase , do_random_crop=lowerCAmelCase )
snake_case = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase )
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' )
snake_case = encoded_inputs.pixel_values.to(lowerCAmelCase )
with torch.no_grad():
snake_case = model(lowerCAmelCase )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase , target_sizes=[(5_00, 3_00)] )
snake_case = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
snake_case = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
snake_case = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase )
| 150 | """simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(_UpperCamelCase )) / (2 * a)
snake_case = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case ,snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 150 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__( self : int, a_ : Optional[int], a_ : Optional[Any]=13, a_ : Any=32, a_ : int=3, a_ : Dict=4, a_ : Optional[int]=[10, 20, 30, 40], a_ : Any=[2, 2, 3, 2], a_ : Optional[Any]=True, a_ : Dict=True, a_ : Optional[int]=37, a_ : List[Any]="gelu", a_ : Optional[int]=10, a_ : Dict=0.02, a_ : Dict=["stage2", "stage3", "stage4"], a_ : str=[2, 3, 4], a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_stages
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
UpperCamelCase__ = scope
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase_ ( self : Optional[Any], a_ : List[str], a_ : Any, a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase_ ( self : Tuple, a_ : Any, a_ : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = ConvNextForImageClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int], a_ : Optional[int], a_ : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ = None
UpperCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase : str = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def lowercase_ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Any, a_ : int, a_ : List[Any] ):
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(a_, a_ ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = ConvNextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a_, return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**a_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, a_ )
UpperCamelCase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1e-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase : str = ConvNextConfig
_lowerCamelCase : Optional[Any] = False
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModelTester(self ) | 31 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase ) | 31 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = analyze_text(__SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowercase__ : Any = sum(single_char_strings.values() )
# one length string
lowercase__ : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase__ : List[str] = single_char_strings[ch]
lowercase__ : str = my_str / all_sum
my_fir_sum += prob * math.loga(__SCREAMING_SNAKE_CASE ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowercase__ : str = sum(two_char_strings.values() )
lowercase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase__ : List[Any] = cha + cha
if sequence in two_char_strings:
lowercase__ : Any = two_char_strings[sequence]
lowercase__ : int = int(__SCREAMING_SNAKE_CASE ) / all_sum
my_sec_sum += prob * math.loga(__SCREAMING_SNAKE_CASE )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : str = Counter() # type: ignore
lowercase__ : int = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a_ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 77 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return None
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'tf' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'pt' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case ) )
vocab_file.flush()
lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , 'pt' , 12 , snake_case )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case )
lowercase = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case )
lowercase = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import TFBertModel
lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FeatureExtractionPipeline(snake_case , snake_case )
lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 195 | 0 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Tuple = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCAmelCase : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : List[Any] = 45
lowerCAmelCase : List[str] = 1581
lowerCAmelCase : List[str] = 1517
lowerCAmelCase : List[Any] = 1570
lowerCAmelCase : List[str] = 1584
lowerCAmelCase : Tuple = 1793
lowerCAmelCase : Union[str, Any] = 1795
lowerCAmelCase : Tuple = 1916
lowerCAmelCase : Tuple = 1864
lowerCAmelCase : Any = 1905
lowerCAmelCase : int = 1919
lowerCAmelCase : Union[str, Any] = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : str = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 362 |
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "M-CLIP"
def __init__( self , _a=1_024 , _a=768 , **_a ):
"""simple docstring"""
lowerCamelCase = transformerDimSize
lowerCamelCase = imageDimSize
super().__init__(**_a )
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = MCLIPConfig
def __init__( self , _a , *_a , **_a ):
"""simple docstring"""
super().__init__(_a , *_a , **_a )
lowerCamelCase = XLMRobertaModel(_a )
lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.transformer(input_ids=_a , attention_mask=_a )[0]
lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_a ), embs
| 168 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ = field(
default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ = field(
default=_a , metadata={"""help""": """The column name of the images in the files."""} )
lowerCAmelCase_ = field(default=_a , metadata={"""help""": """A folder containing the training data."""} )
lowerCAmelCase_ = field(default=_a , metadata={"""help""": """A folder containing the validation data."""} )
lowerCAmelCase_ = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCAmelCase_ = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=_a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {}
if self.train_dir is not None:
lowerCamelCase__ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ = self.validation_dir
lowerCamelCase__ = data_files if data_files else None
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default=_a , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowerCAmelCase_ = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowerCAmelCase_ = field(
default=_a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCAmelCase_ = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ = field(default=_a , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ = field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase_ = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowerCAmelCase_ = field(
default=_a , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __A ( _a ):
'''simple docstring'''
lowerCAmelCase_ = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase__() -> Dict:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' ,lowerCamelCase_ ,lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,lowerCamelCase_ ) and data_args.train_val_split > 0.0:
lowerCamelCase__ = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ = split['train']
lowerCamelCase__ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name ,**lowerCamelCase_ )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path ,**lowerCamelCase_ )
else:
lowerCamelCase__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name ,**lowerCamelCase_ )
elif model_args.model_name_or_path:
lowerCamelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path ,**lowerCamelCase_ )
else:
lowerCamelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCamelCase_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ = ViTMAEForPreTraining(lowerCamelCase_ )
if training_args.do_train:
lowerCamelCase__ = ds['train'].column_names
else:
lowerCamelCase__ = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ = 'image'
elif "img" in column_names:
lowerCamelCase__ = 'img'
else:
lowerCamelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ = image_processor.size['shortest_edge']
else:
lowerCamelCase__ = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ = Compose(
[
Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCamelCase_ ,scale=(0.2, 1.0) ,interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
def preprocess_images(__snake_case ):
lowerCamelCase__ = [transforms(lowerCamelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCamelCase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCamelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase_ )
# Compute absolute learning rate
lowerCamelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=lowerCamelCase_ ,args=lowerCamelCase_ ,train_dataset=ds['''train'''] if training_args.do_train else None ,eval_dataset=ds['''validation'''] if training_args.do_eval else None ,tokenizer=lowerCamelCase_ ,data_collator=lowerCamelCase_ ,)
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics )
trainer.save_metrics('''train''' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics('''eval''' ,lowerCamelCase_ )
trainer.save_metrics('''eval''' ,lowerCamelCase_ )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase_ )
else:
trainer.create_model_card(**lowerCamelCase_ )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 209 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
from __future__ import annotations
from collections.abc import Generator
def _lowercase ( ) -> Generator[int, None, None]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 2
while True:
SCREAMING_SNAKE_CASE__ = factor_map.pop(UpperCamelCase_ , UpperCamelCase_ )
if factor:
SCREAMING_SNAKE_CASE__ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE__ = factor
else:
SCREAMING_SNAKE_CASE__ = prime
yield prime
prime += 1
def _lowercase ( UpperCamelCase_ = 1e10 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sieve()
SCREAMING_SNAKE_CASE__ = 1
while True:
SCREAMING_SNAKE_CASE__ = next(UpperCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Tuple = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case : int = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : str = ""
else:
__snake_case : Tuple = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Optional[int] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__snake_case : Any = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : List[str] = in_proj_weight[
: config.hidden_size, :
]
__snake_case : str = in_proj_bias[: config.hidden_size]
__snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Dict = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = dct.pop(__lowerCamelCase )
__snake_case : Union[str, Any] = val
def lowerCAmelCase_ ( ):
__snake_case : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case : Optional[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowerCamelCase , )
__snake_case : Union[str, Any] = ViTHybridConfig(backbone_config=__lowerCamelCase , image_size=3_8_4 , num_labels=1_0_0_0 )
__snake_case : str = False
# load original model from timm
__snake_case : Optional[Any] = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCamelCase )
__snake_case : int = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Any = "huggingface/label-files"
__snake_case : List[str] = "imagenet-1k-id2label.json"
__snake_case : List[str] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Dict = idalabel
__snake_case : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__snake_case : int = ViTHybridModel(__lowerCamelCase ).eval()
else:
__snake_case : Optional[int] = ViTHybridForImageClassification(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
__snake_case : str = create_transform(**resolve_data_config({} , model=__lowerCamelCase ) )
__snake_case : Any = transform.transforms
__snake_case : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__snake_case : List[str] = ViTHybridImageProcessor(
do_resize=__lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__snake_case : Dict = prepare_img()
__snake_case : Tuple = transform(__lowerCamelCase ).unsqueeze(0 )
__snake_case : Tuple = processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
# verify logits
with torch.no_grad():
__snake_case : int = model(__lowerCamelCase )
__snake_case : int = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__snake_case : str = timm_model.forward_features(__lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
__snake_case : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_snake_case : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Dict:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.FloatTensor] = None , **lowerCamelCase : Any , ) -> Optional[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Tuple = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCamelCase )}.' )
# get prompt text embeddings
__snake_case : Tuple = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : int = text_embeddings.shape
__snake_case : Any = text_embeddings.repeat(1 , lowerCamelCase , 1 )
__snake_case : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : Tuple = negative_prompt
__snake_case : str = text_input_ids.shape[-1]
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Tuple = uncond_embeddings.shape[1]
__snake_case : Any = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
__snake_case : Union[str, Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : str = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : Tuple = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : Any = max(-dx , 0 )
__snake_case : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Optional[Any] = {}
if accepts_eta:
__snake_case : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : str = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = 1 / 0.1_82_15 * latents
__snake_case : Dict = self.vae.decode(lowerCamelCase ).sample
__snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
__snake_case , __snake_case : str = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Dict = None
if output_type == "pil":
__snake_case : Any = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 123 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , **__UpperCAmelCase : str , ) ->Any:
"""simple docstring"""
super().__init__(features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , **__UpperCAmelCase )
a = Sql(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , sql=__UpperCAmelCase , con=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , )
# Build dataset for splits
a = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
a = dataset
a = name
a = con
a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a = num_proc
a = to_sql_kwargs
def __lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
a = self.to_sql_kwargs.pop('''sql''' , __UpperCAmelCase )
a = self.to_sql_kwargs.pop('''con''' , __UpperCAmelCase )
a = self.to_sql_kwargs.pop('''index''' , __UpperCAmelCase )
a = self._write(index=__UpperCAmelCase , **self.to_sql_kwargs )
return written
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple ) ->List[Any]:
"""simple docstring"""
a , a , a = args
a = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a = batch.to_pandas()
a = df.to_sql(self.name , self.con , index=__UpperCAmelCase , **__UpperCAmelCase )
return num_rows or len(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->int:
"""simple docstring"""
a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a , a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 352 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = args.log_outputs
_lowerCAmelCase = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_lowerCAmelCase = load_metric("""wer""" )
_lowerCAmelCase = load_metric("""cer""" )
# compute metrics
_lowerCAmelCase = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
_lowerCAmelCase = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
_lowerCAmelCase = F'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(F'{dataset_id}_eval_results.txt' , """w""" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowerCAmelCase = F'log_{dataset_id}_predictions.txt'
_lowerCAmelCase = F'log_{dataset_id}_targets.txt'
with open(snake_case , """w""" ) as p, open(snake_case , """w""" ) as t:
# mapping function to write output
def write_to_file(snake_case , snake_case ):
p.write(F'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case , with_indices=snake_case )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowerCAmelCase = re.sub(snake_case , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowerCAmelCase = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_lowerCAmelCase = """ """.join(text.split(snake_case ) )
return text
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
_lowerCAmelCase = feature_extractor.sampling_rate
# resample audio
_lowerCAmelCase = dataset.cast_column("""audio""" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
_lowerCAmelCase = 0 if torch.cuda.is_available() else -1
_lowerCAmelCase = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case ):
_lowerCAmelCase = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_lowerCAmelCase = prediction["""text"""]
_lowerCAmelCase = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_lowerCAmelCase = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
A__ = parser.parse_args()
main(args)
| 82 |
from __future__ import annotations
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
_lowerCAmelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def _UpperCAmelCase ( snake_case = 11 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCAmelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 82 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=7_68 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = proj_size
__SCREAMING_SNAKE_CASE :int = CLIPVisionModel(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = PaintByExampleMapper(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = nn.LayerNorm(config.hidden_size )
__SCREAMING_SNAKE_CASE :str = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.model(pixel_values=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = clip_output.pooler_output
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.mapper(latent_states[:, None] )
__SCREAMING_SNAKE_CASE :List[Any] = self.final_layer_norm(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.proj_out(SCREAMING_SNAKE_CASE__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Tuple = (config.num_hidden_layers + 1) // 5
__SCREAMING_SNAKE_CASE :Tuple = config.hidden_size
__SCREAMING_SNAKE_CASE :Tuple = 1
__SCREAMING_SNAKE_CASE :Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,activation_fn='''gelu''' ,attention_bias=SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ )
] )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
for block in self.blocks:
__SCREAMING_SNAKE_CASE :Tuple = block(SCREAMING_SNAKE_CASE__ )
return hidden_states | 239 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@require_torch
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = pipeline(
task='''zero-shot-audio-classification''' ,model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
__SCREAMING_SNAKE_CASE :Any = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :int = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :Dict = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = pipeline(
task='''zero-shot-audio-classification''' ,model='''laion/clap-htsat-unfused''' ,)
# This is an audio of a dog
__SCREAMING_SNAKE_CASE :List[Any] = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :Tuple = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :str = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] ,)
__SCREAMING_SNAKE_CASE :Dict = audio_classifier([audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = audio_classifier(
[audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ,batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass | 239 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['YolosFeatureExtractor']
UpperCamelCase_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 243 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self) ->Tuple:
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(__UpperCAmelCase)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , __UpperCAmelCase)
a_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4)) | 243 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
if n_term == "":
return []
lowerCamelCase_ = []
for temp in range(int(lowercase ) ):
series.append(f"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
lowerCamelCase : int = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 208 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : Any = 10
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
for i in range(__lowercase , __lowercase ):
if array[i] == target:
return i
return -1
def a_ ( __lowercase : list[int] , __lowercase : int ) -> int:
_snake_case = 0
_snake_case = len(__lowercase )
while left <= right:
if right - left < precision:
return lin_search(__lowercase , __lowercase , __lowercase , __lowercase )
_snake_case = (left + right) // 3 + 1
_snake_case = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_snake_case = one_third - 1
elif array[two_third] < target:
_snake_case = two_third + 1
else:
_snake_case = one_third + 1
_snake_case = two_third - 1
else:
return -1
def a_ ( __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(__lowercase , __lowercase , __lowercase , __lowercase )
_snake_case = (left + right) // 3 + 1
_snake_case = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowercase , one_third - 1 , __lowercase , __lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowercase , __lowercase , __lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowercase , __lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : List[Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCamelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCamelCase : Union[str, Any] = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCamelCase : Tuple = ite_ternary_search(collection, target)
_lowerCamelCase : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''') | 282 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 1 |
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
A_ , A_ : str = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
import argparse
from collections import defaultdict
import yaml
a_ = """docs/source/en/_toctree.yml"""
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
__lowerCamelCase = [key for key, value in counts.items() if value > 1]
__lowerCamelCase = []
for duplicate_key in duplicates:
__lowerCamelCase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_UpperCamelCase ,key=lambda _UpperCamelCase : s["title"].lower() )
def a__ ( _UpperCamelCase : Any=False ):
with open(_UpperCamelCase ,encoding='''utf-8''' ) as f:
__lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase = content[api_idx]['''sections''']
# Then to the model doc
__lowerCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__lowerCamelCase = api_doc[model_idx]['''sections''']
__lowerCamelCase = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if '''sections''' in section]
__lowerCamelCase = False
for idx, modality_doc in modalities_docs:
__lowerCamelCase = modality_doc['''sections''']
__lowerCamelCase = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
__lowerCamelCase = True
if overwrite:
__lowerCamelCase = new_modality_doc
if diff:
if overwrite:
__lowerCamelCase = model_doc
__lowerCamelCase = api_doc
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(yaml.dump(_UpperCamelCase ,allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 330 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__lowerCamelCase = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowerCamelCase = text
def lowerCamelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowerCamelCase = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowerCamelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:]
__lowerCamelCase = model_inputs.pop('''conversation''' )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = model_outputs['''output_ids''']
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
__lowerCamelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 330 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 115 |
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE_:str = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
SCREAMING_SNAKE_CASE_:Union[str, Any] = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
SCREAMING_SNAKE_CASE_:List[Any] = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""", id="""sequence""" ),
"""references""": datasets.Value("""string""", id="""sequence""" ),
} ), codebase_urls=["""https://github.com/jitsi/jiwer/"""], reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
], )
def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=False ):
if concatenate_texts:
return compute_measures(lowerCamelCase__, lowerCamelCase__ )["wer"]
else:
A : str = 0
A : int = 0
for prediction, reference in zip(lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = compute_measures(lowerCamelCase__, lowerCamelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 115 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
UpperCAmelCase = ['''names''', '''prefix''']
UpperCAmelCase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
UpperCAmelCase = ['''encoding_errors''', '''on_bad_lines''']
UpperCAmelCase = ['''date_format''']
@dataclass
class lowerCAmelCase ( datasets.BuilderConfig ):
lowerCAmelCase_ = ","
lowerCAmelCase_ = None
lowerCAmelCase_ = "infer"
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = "."
lowerCAmelCase_ = None
lowerCAmelCase_ = '"'
lowerCAmelCase_ = 0
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = 1_0_0_0_0
lowerCAmelCase_ = None
lowerCAmelCase_ = "strict"
lowerCAmelCase_ = "error"
lowerCAmelCase_ = None
def snake_case ( self : Tuple ):
"""simple docstring"""
if self.delimiter is not None:
__lowercase =self.delimiter
if self.column_names is not None:
__lowercase =self.column_names
@property
def snake_case ( self : str ):
"""simple docstring"""
__lowercase ={
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowercase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCAmelCase ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = CsvConfig
def snake_case ( self : List[Any] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self : int , __lowercase : int ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__lowercase =dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
__lowercase =data_files
if isinstance(__lowercase , __lowercase ):
__lowercase =[files]
__lowercase =[dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__lowercase =[]
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
__lowercase =[files]
__lowercase =[dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={'files': files} ) )
return splits
def snake_case ( self : str , __lowercase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
__lowercase =self.config.features.arrow_schema
if all(not require_storage_cast(__lowercase ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowercase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase =table_cast(__lowercase , __lowercase )
return pa_table
def snake_case ( self : Optional[Any] , __lowercase : Dict ):
"""simple docstring"""
__lowercase =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowercase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
__lowercase =pd.read_csv(__lowercase , iterator=__lowercase , dtype=__lowercase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowercase ):
__lowercase =pa.Table.from_pandas(__lowercase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowercase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__lowercase )}: {e}''' )
raise
| 141 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "data2vec-audio"
def __init__( self : Tuple , __lowercase : Optional[int]=32 , __lowercase : List[str]=768 , __lowercase : List[str]=12 , __lowercase : str=12 , __lowercase : Tuple=3072 , __lowercase : Any="gelu" , __lowercase : Dict=0.1 , __lowercase : Any=0.1 , __lowercase : Tuple=0.1 , __lowercase : List[str]=0.0 , __lowercase : List[Any]=0.1 , __lowercase : str=0.1 , __lowercase : Optional[int]=0.0_2 , __lowercase : Dict=1E-5 , __lowercase : Any="gelu" , __lowercase : Dict=(512, 512, 512, 512, 512, 512, 512) , __lowercase : str=(5, 2, 2, 2, 2, 2, 2) , __lowercase : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowercase : Dict=False , __lowercase : int=16 , __lowercase : Any=19 , __lowercase : Tuple=5 , __lowercase : Optional[Any]=0.0_5 , __lowercase : Optional[int]=10 , __lowercase : int=2 , __lowercase : Optional[Any]=0.0 , __lowercase : Tuple=10 , __lowercase : Union[str, Any]=0 , __lowercase : Optional[int]="sum" , __lowercase : str=False , __lowercase : Union[str, Any]=False , __lowercase : Any=256 , __lowercase : str=(512, 512, 512, 512, 1500) , __lowercase : Union[str, Any]=(5, 3, 3, 1, 1) , __lowercase : List[Any]=(1, 2, 3, 1, 1) , __lowercase : Any=512 , __lowercase : int=0 , __lowercase : Union[str, Any]=1 , __lowercase : Optional[int]=2 , __lowercase : Any=False , __lowercase : Optional[int]=3 , __lowercase : Optional[Any]=2 , __lowercase : Any=3 , __lowercase : Tuple=None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__lowercase =hidden_size
__lowercase =feat_extract_activation
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =conv_bias
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =conv_pos_kernel_size
__lowercase =len(self.conv_dim )
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =vocab_size
__lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
__lowercase =mask_feature_min_masks
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =list(__lowercase )
__lowercase =xvector_output_dim
@property
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 141 | 1 |
"""simple docstring"""
import numpy as np
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase : bool , UpperCAmelCase : bool ):
'''simple docstring'''
def run_func(UpperCAmelCase : List[str] ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Tuple ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple =random.Random()
UpperCamelCase__ : List[str] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = "TensorFlow"
@property
def _lowerCAmelCase ( self : int ):
return tf.__version__
def _lowerCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
UpperCamelCase__ : Optional[int] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : str =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_inference )
def _lowerCAmelCase ( self : str , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : int =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_train )
def _lowerCAmelCase ( self : Any , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : Optional[Any] =self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_inference )
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
UpperCamelCase__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
UpperCamelCase__ : List[Any] =self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_train )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Dict =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Dict ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[str] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Optional[int] =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Any =TF_MODEL_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : Optional[int] =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : List[Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase_ , decoder_input_ids=lowercase_ , training=lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase_ , training=lowercase_ )
UpperCamelCase__ : Dict =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
UpperCamelCase__ : List[str] =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
UpperCamelCase__ : Optional[Any] =(
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase__ : Tuple ='''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase__ : List[Any] =__import__('''transformers''' , fromlist=[model_class] )
UpperCamelCase__ : Dict =getattr(lowercase_ , lowercase_ )
UpperCamelCase__ : Tuple =model_cls(lowercase_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
UpperCamelCase__ : Optional[int] =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase__ : str =config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
UpperCamelCase__ : Union[str, Any] =random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase__ : Optional[Any] =model(lowercase_ , decoder_input_ids=lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : Dict =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase__ : Dict =model(lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
UpperCamelCase__ : List[str] =tf.gradients(lowercase_ , model.trainable_variables )
return gradients
UpperCamelCase__ : List[Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase__ : int =timeit.repeat(
lowercase_ , repeat=self.args.repeat , number=10 , )
return min(lowercase_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCAmelCase ( self : Dict , lowercase_ : Callable[[], None] ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
UpperCamelCase__ : Tuple =start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
UpperCamelCase__ : List[str] ='''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase__ : Optional[Any] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase__ : Dict =nvml.nvmlDeviceGetMemoryInfo(lowercase_ )
UpperCamelCase__ : str =meminfo.used
UpperCamelCase__ : int =Memory(lowercase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
UpperCamelCase__ : Union[str, Any] =None
else:
UpperCamelCase__ : Optional[int] =measure_peak_memory_cpu(lowercase_ )
UpperCamelCase__ : Dict =Memory(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase__ : Tuple =stop_memory_tracing(lowercase_ )
if memory is None:
UpperCamelCase__ : List[Any] =summary.total
else:
UpperCamelCase__ : List[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''time_series_transformer'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Any , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "student_t" , _A : str = "nll" , _A : int = 1 , _A : List[int] = [1, 2, 3, 4, 5, 6, 7] , _A : Optional[Union[str, bool]] = "mean" , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : int = 0 , _A : Optional[List[int]] = None , _A : Optional[List[int]] = None , _A : int = 32 , _A : int = 32 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : int = 2 , _A : bool = True , _A : str = "gelu" , _A : int = 64 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : float = 0.1 , _A : int = 100 , _A : float = 0.02 , _A : Optional[int]=True , **_A : Optional[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = prediction_length
__SCREAMING_SNAKE_CASE : List[Any] = context_length or prediction_length
__SCREAMING_SNAKE_CASE : Optional[Any] = distribution_output
__SCREAMING_SNAKE_CASE : Optional[Any] = loss
__SCREAMING_SNAKE_CASE : int = input_size
__SCREAMING_SNAKE_CASE : int = num_time_features
__SCREAMING_SNAKE_CASE : Optional[int] = lags_sequence
__SCREAMING_SNAKE_CASE : List[str] = scaling
__SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_real_features
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = cardinality
else:
__SCREAMING_SNAKE_CASE : int = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__SCREAMING_SNAKE_CASE : Dict = embedding_dimension
else:
__SCREAMING_SNAKE_CASE : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE : Dict = input_size * len(_A ) + self._number_of_features
__SCREAMING_SNAKE_CASE : Dict = d_model
__SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : Dict = encoder_layers
__SCREAMING_SNAKE_CASE : Any = decoder_layers
__SCREAMING_SNAKE_CASE : Any = dropout
__SCREAMING_SNAKE_CASE : Any = attention_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
__SCREAMING_SNAKE_CASE : Dict = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Any = decoder_layerdrop
__SCREAMING_SNAKE_CASE : Any = activation_function
__SCREAMING_SNAKE_CASE : List[str] = init_std
__SCREAMING_SNAKE_CASE : Dict = use_cache
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 303 |
import math
import os
import sys
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
try:
with open(snake_case , '''rb''' ) as binary_file:
__SCREAMING_SNAKE_CASE : int = binary_file.read()
for dat in data:
__SCREAMING_SNAKE_CASE : Optional[Any] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
lexicon.pop(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
__SCREAMING_SNAKE_CASE : int = '''0''' + lexicon[curr_key]
__SCREAMING_SNAKE_CASE : List[str] = bin(snake_case )[2:]
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {'''0''': '''0''', '''1''': '''1'''}
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = '''''', ''''''
__SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__SCREAMING_SNAKE_CASE : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
__SCREAMING_SNAKE_CASE : Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__SCREAMING_SNAKE_CASE : Dict = lexicon[curr_string]
result += last_match_id
return result
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.getsize(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = bin(snake_case )[2:]
__SCREAMING_SNAKE_CASE : int = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 8
try:
with open(snake_case , '''wb''' ) as opened_file:
__SCREAMING_SNAKE_CASE : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = read_file_binary(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = compress_data(snake_case )
__SCREAMING_SNAKE_CASE : Dict = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 303 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase_ ( a_ ):
_A : Dict = 'yolos'
def __init__( self , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=[5_12, 8_64] , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=1_00 , snake_case__=True , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , **snake_case__ , ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = num_detection_tokens
UpperCAmelCase = use_mid_position_embeddings
UpperCAmelCase = auxiliary_loss
# Hungarian matcher
UpperCAmelCase = class_cost
UpperCAmelCase = bbox_cost
UpperCAmelCase = giou_cost
# Loss coefficients
UpperCAmelCase = bbox_loss_coefficient
UpperCAmelCase = giou_loss_coefficient
UpperCAmelCase = eos_coefficient
class UpperCamelCase_ ( a_ ):
_A : Any = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-4
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
| 248 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248 | 1 |
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=None ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = data
lowercase__ : Tuple = previous
lowercase__ : Tuple = next_node
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
return f"""{self.data}"""
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.data
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.next
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.previous
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = head
def __iter__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
if not self.current:
raise StopIteration
else:
lowercase__ : str = self.current.get_data()
lowercase__ : Optional[Any] = self.current.get_next()
return value
class __A :
'''simple docstring'''
def __init__( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = None # First node in list
lowercase__ : int = None # Last node in list
def __str__( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = self.head
lowercase__ : Dict = []
while current is not None:
nodes.append(current.get_data() )
lowercase__ : Dict = current.get_next()
return " ".join(str(_snake_case ) for node in nodes )
def __contains__( self : Any ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.head
while current:
if current.get_data() == value:
return True
lowercase__ : Tuple = current.get_next()
return False
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return LinkedListIterator(self.head )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase ( self : List[Any] ,_snake_case : Node ) -> None:
"""simple docstring"""
if self.head is None:
lowercase__ : Dict = node
lowercase__ : Dict = node
else:
self.insert_before_node(self.head ,_snake_case )
def UpperCAmelCase ( self : List[Any] ,_snake_case : Node ) -> None:
"""simple docstring"""
if self.head is None:
self.set_head(_snake_case )
else:
self.insert_after_node(self.tail ,_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : int ) -> None:
"""simple docstring"""
lowercase__ : Any = Node(_snake_case )
if self.head is None:
self.set_head(_snake_case )
else:
self.set_tail(_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Node ,_snake_case : Node ) -> None:
"""simple docstring"""
lowercase__ : Tuple = node
lowercase__ : Optional[Any] = node.previous
if node.get_previous() is None:
lowercase__ : Optional[int] = node_to_insert
else:
lowercase__ : List[str] = node_to_insert
lowercase__ : Tuple = node_to_insert
def UpperCAmelCase ( self : int ,_snake_case : Node ,_snake_case : Node ) -> None:
"""simple docstring"""
lowercase__ : Dict = node
lowercase__ : str = node.next
if node.get_next() is None:
lowercase__ : int = node_to_insert
else:
lowercase__ : Optional[int] = node_to_insert
lowercase__ : Tuple = node_to_insert
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
lowercase__ : Dict = 1
lowercase__ : Optional[Any] = Node(_snake_case )
lowercase__ : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_snake_case ,_snake_case )
return
current_position += 1
lowercase__ : Optional[Any] = node.next
self.insert_after_node(self.tail ,_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : int ) -> Node:
"""simple docstring"""
lowercase__ : Any = self.head
while node:
if node.get_data() == item:
return node
lowercase__ : Tuple = node.get_next()
raise Exception('''Node not found''' )
def UpperCAmelCase ( self : Dict ,_snake_case : List[Any] ) -> str:
"""simple docstring"""
if (node := self.get_node(_snake_case )) is not None:
if node == self.head:
lowercase__ : List[Any] = self.head.get_next()
if node == self.tail:
lowercase__ : List[Any] = self.tail.get_previous()
self.remove_node_pointers(_snake_case )
@staticmethod
def UpperCAmelCase ( _snake_case : Node ) -> None:
"""simple docstring"""
if node.get_next():
lowercase__ : int = node.previous
if node.get_previous():
lowercase__ : Optional[int] = node.next
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
return self.head is None
def __UpperCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16 | 1 |
"""simple docstring"""
import random
from typing import Any
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for _ in range(len(lowerCAmelCase ) ):
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 )
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 )
UpperCAmelCase , UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : Union[str, Any] = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 248 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase_ : Any = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
lowerCAmelCase_ : Optional[int] = '''|'''.join(sys.argv[1:])
lowerCAmelCase_ : Union[str, Any] = re.compile(RF'^({joined_dirs}).*?\.py$')
lowerCAmelCase_ : int = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 248 | 1 |
"""simple docstring"""
import cva
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, UpperCAmelCase__ : float, UpperCAmelCase__ : int ):
if k in (0.04, 0.06):
__lowercase = k
__lowercase = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Any ):
return str(self.k )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str ):
__lowercase = cva.imread(UpperCAmelCase__, 0 )
__lowercase ,__lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(UpperCAmelCase__, cva.COLOR_GRAY2RGB )
__lowercase ,__lowercase = np.gradient(UpperCAmelCase__ )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.04
__lowercase = self.window_size // 2
for y in range(UpperCAmelCase__, h - offset ):
for x in range(UpperCAmelCase__, w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
_a = HarrisCorner(0.04, 3)
_a , _a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 17 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __UpperCamelCase )-> str:
if "://" in dataset_path:
UpperCamelCase = dataset_path.split("""://""" )[1]
return dataset_path
def lowercase__ ( __UpperCamelCase )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = not is_remote_filesystem(__UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__UpperCamelCase ) , fs._strip_protocol(__UpperCamelCase ) )
else:
fs.mv(__UpperCamelCase , __UpperCamelCase , recursive=__UpperCamelCase )
def lowercase__ ( )-> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = threading.Lock()
| 321 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : int = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 58 |
"""simple docstring"""
from math import pi
def _lowercase ( __snake_case ,__snake_case ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 58 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = 13
snake_case__ : Any = 7
snake_case__ : Optional[int] = True
snake_case__ : Union[str, Any] = True
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = 99
snake_case__ : List[str] = 32
snake_case__ : Tuple = 2
snake_case__ : List[Any] = 4
snake_case__ : List[str] = 37
snake_case__ : str = 'gelu'
snake_case__ : Union[str, Any] = 0.1
snake_case__ : int = 0.1
snake_case__ : List[Any] = 512
snake_case__ : str = 16
snake_case__ : Optional[Any] = 2
snake_case__ : Dict = 0.0_2
snake_case__ : Dict = 3
snake_case__ : int = 4
snake_case__ : Any = None
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = TFRoFormerModel(config=__UpperCamelCase )
snake_case__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case__ : Dict = [input_ids, input_mask]
snake_case__ : Any = model(__UpperCamelCase )
snake_case__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : int = True
snake_case__ : Optional[Any] = TFRoFormerForCausalLM(config=__UpperCamelCase )
snake_case__ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Union[str, Any] = model(__UpperCamelCase )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = TFRoFormerForMaskedLM(config=__UpperCamelCase )
snake_case__ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = self.num_labels
snake_case__ : int = TFRoFormerForSequenceClassification(config=__UpperCamelCase )
snake_case__ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = self.num_choices
snake_case__ : Dict = TFRoFormerForMultipleChoice(config=__UpperCamelCase )
snake_case__ : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = self.num_labels
snake_case__ : str = TFRoFormerForTokenClassification(config=__UpperCamelCase )
snake_case__ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : int = TFRoFormerForQuestionAnswering(config=__UpperCamelCase )
snake_case__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case__ : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = TFRoFormerModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCamelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : int = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : str = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : List[str] = model(__UpperCamelCase )[0]
# TODO Replace vocab size
snake_case__ : str = 50000
snake_case__ : Dict = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
snake_case__ : Any = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase = 1E-4
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = tf.constant([[4, 10]] )
snake_case__ : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
snake_case__ : List[str] = emba(input_ids.shape )
snake_case__ : Union[str, Any] = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , atol=self.tolerance )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
snake_case__ : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
snake_case__ : Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , atol=self.tolerance )
@require_tf
class __snake_case ( unittest.TestCase ):
__lowerCamelCase = 1E-4
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Tuple = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case__ : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case__ : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
snake_case__ : Optional[int] = embed_positions([2, 16, 768] )[None, None, :, :]
snake_case__ , snake_case__ : List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
snake_case__ : List[str] = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCamelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCamelCase , atol=self.tolerance )
| 143 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Dict = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if metric == "rouge2":
A = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
A = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
A = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
A = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=lowercase__ , verbose=lowercase__ , )
class __UpperCamelCase ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
A = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=True):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""")
A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
A = Path(pl_module.hparams.output_dir)
if type_path == "test":
A = od / "test_results.txt"
A = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
A = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE)
generations_file.parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE)
with open(__SCREAMING_SNAKE_CASE , "a+") as writer:
for key in sorted(__SCREAMING_SNAKE_CASE):
if key in ["log", "progress_bar", "preds"]:
continue
A = metrics[key]
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
A = val.item()
A = F"""{key}: {val:.6f}\n"""
writer.write(__SCREAMING_SNAKE_CASE)
if not save_generations:
return
if "preds" in metrics:
A = "\n".join(metrics["preds"])
generations_file.open("w+").write(__SCREAMING_SNAKE_CASE)
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple):
try:
A = pl_module.model.model.num_parameters()
except AttributeError:
A = pl_module.model.num_parameters()
A = count_trainable_parameters(__SCREAMING_SNAKE_CASE)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6})
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "test")
@rank_zero_only
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : Optional[Any]):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 356 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 57 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 195 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 195 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _snake_case ( enum.Enum ):
snake_case__ = 0
snake_case__ = 1
snake_case__ = 2
@add_end_docstrings(a__ )
class _snake_case ( a__ ):
snake_case__ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCamelCase : Any = None
if self.model.config.prefix is not None:
__lowerCamelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCamelCase : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCamelCase : Optional[Any] = self._sanitize_parameters(prefix=UpperCAmelCase , **self._forward_params )
__lowerCamelCase : Optional[int] = {**self._preprocess_params, **preprocess_params}
__lowerCamelCase : Tuple = {**self._forward_params, **forward_params}
def lowerCamelCase__ ( self : str , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : str , ):
__lowerCamelCase : Union[str, Any] = {}
if prefix is not None:
__lowerCamelCase : Dict = prefix
if prefix:
__lowerCamelCase : Tuple = self.tokenizer(
UpperCAmelCase , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
__lowerCamelCase : Tuple = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
" [None, 'hole']" )
__lowerCamelCase : List[Any] = handle_long_generation
preprocess_params.update(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = generate_kwargs
__lowerCamelCase : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
__lowerCamelCase : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
__lowerCamelCase : Optional[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCamelCase : Dict = return_type
if clean_up_tokenization_spaces is not None:
__lowerCamelCase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCamelCase : Any = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
if len(UpperCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__lowerCamelCase : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*UpperCAmelCase , **UpperCAmelCase )
def __call__( self : Dict , UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any="" , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : Dict = self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=self.framework )
__lowerCamelCase : Any = prompt_text
if handle_long_generation == "hole":
__lowerCamelCase : List[str] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCamelCase : str = generate_kwargs["max_new_tokens"]
else:
__lowerCamelCase : Optional[Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCamelCase : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
__lowerCamelCase : Optional[int] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCamelCase : Union[str, Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ):
__lowerCamelCase : str = model_inputs["input_ids"]
__lowerCamelCase : List[Any] = model_inputs.get("attention_mask" , UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Dict = 1
else:
__lowerCamelCase : List[str] = input_ids.shape[0]
__lowerCamelCase : str = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCamelCase : int = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
__lowerCamelCase : Optional[int] = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCamelCase : Tuple = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCamelCase : Union[str, Any] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCamelCase : int = self.model.generate(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCamelCase : Any = generated_sequence.reshape(UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCamelCase : Optional[int] = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=ReturnType.FULL_TEXT , UpperCAmelCase : Optional[Any]=True ):
__lowerCamelCase : str = model_outputs["generated_sequence"][0]
__lowerCamelCase : Dict = model_outputs["input_ids"]
__lowerCamelCase : Tuple = model_outputs["prompt_text"]
__lowerCamelCase : Optional[Any] = generated_sequence.numpy().tolist()
__lowerCamelCase : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCamelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCamelCase : List[Any] = self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCamelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCamelCase : Union[str, Any] = text[prompt_length:]
__lowerCamelCase : Dict = {"generated_text": all_text}
records.append(UpperCAmelCase )
return records | 350 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _snake_case ( a__ ):
snake_case__ = "visual_bert"
def __init__( self : int , UpperCAmelCase : Any=30522 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : List[str]=2 , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Union[str, Any] = visual_embedding_dim
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : List[str] = type_vocab_size
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[str] = bypass_transformer
__lowerCamelCase : Optional[int] = special_visual_initialize | 64 | 0 |
import os
def _snake_case( ) -> Any:
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as f:
lowercase : List[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCamelCase ) for x in f.readline().split()] )
lowercase : Tuple = 0
# right
for i in range(20 ):
for j in range(17 ):
lowercase : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase : str = temp
# down
for i in range(17 ):
for j in range(20 ):
lowercase : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase : List[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowercase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowercase : Dict = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 20 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = '▁'
lowercase : Tuple = {'vocab_file': 'spiece.model'}
lowercase : Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
lowercase : Any = {
'google/reformer-crime-and-punishment': 524288,
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self :int , a :List[Any] , a :Tuple="</s>" , a :str="<unk>" , a :Dict=[] , a :Optional[Dict[str, Any]] = None , **a :Union[str, Any] , ) -> None:
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self :Optional[int] ) -> Dict[str, int]:
__UpperCamelCase : str = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :int , a :List[str] ) -> int:
__UpperCamelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : int = {}
__UpperCamelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> str:
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self :Dict , a :Union[str, Any] ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
__UpperCamelCase : Optional[int] = self.sp_model.IdToPiece(a )
return token
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : List[Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : int = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 232 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _lowerCamelCase : Optional[int]="no" , _lowerCamelCase : str = default_json_config_file , _lowerCamelCase : bool = False):
lowercase__ : List[str] = Path(lowerCamelCase_)
path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_)
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''')
return False
lowercase__ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''')
lowercase__ : Union[str, Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Optional[int] = torch.cuda.device_count()
lowercase__ : Tuple = num_gpus
lowercase__ : List[Any] = False
if num_gpus > 1:
lowercase__ : Any = '''MULTI_GPU'''
else:
lowercase__ : Optional[int] = '''NO'''
elif is_xpu_available() and use_xpu:
lowercase__ : Any = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : Union[str, Any] = False
if num_xpus > 1:
lowercase__ : Optional[int] = '''MULTI_XPU'''
else:
lowercase__ : List[str] = '''NO'''
elif is_npu_available():
lowercase__ : List[str] = torch.npu.device_count()
lowercase__ : Optional[int] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : Tuple = '''MULTI_NPU'''
else:
lowercase__ : Any = '''NO'''
else:
lowercase__ : Dict = 0
lowercase__ : Dict = True
lowercase__ : Tuple = 1
lowercase__ : int = '''NO'''
lowercase__ : List[str] = ClusterConfig(**lowerCamelCase_)
config.to_json_file(lowerCamelCase_)
return path
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]):
lowercase__ : Any = parser.add_parser("default" , parents=lowerCamelCase_ , help=lowerCamelCase_ , formatter_class=lowerCamelCase_)
parser.add_argument(
"--config_file" , default=lowerCamelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCamelCase_ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowerCamelCase_)
return parser
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location)
if config_file:
print(f'''accelerate configuration saved at {config_file}''')
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :int = "deit"
def __init__( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : str=12 , UpperCamelCase : List[Any]=12 , UpperCamelCase : Any=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : List[Any]=1E-1_2 , UpperCamelCase : List[str]=2_24 , UpperCamelCase : List[str]=16 , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]=16 , **UpperCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : List[str] = qkv_bias
lowerCAmelCase__ : str = encoder_stride
class _lowerCamelCase ( a_ ):
_lowerCamelCase :List[str] = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
| 242 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( __UpperCAmelCase ) -> bool:
return np.array_equal(__UpperCAmelCase , matrix.conjugate().T )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = v.conjugate().T
lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , np.ndarray )
return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase ))
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 242 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__A = ['bert-base-uncased', 'bert-base-cased']
__A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =tokenizer
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =TFAutoModel.from_config(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase )
_lowerCAmelCase =self.bert(**__UpperCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =[
BertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase =[TFBertTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCAmelCase , use_fast_bert_tokenizer=__UpperCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCAmelCase =[
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
_lowerCAmelCase =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding="""longest""" )
_lowerCAmelCase =tf_tokenizer(__UpperCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =tf_tokenizer(self.paired_sentences )
_lowerCAmelCase =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =tf.function(__UpperCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase =tf.constant(__UpperCAmelCase )
_lowerCAmelCase =compiled_tokenizer(__UpperCAmelCase )
_lowerCAmelCase =tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase =ModelToSave(tokenizer=__UpperCAmelCase )
_lowerCAmelCase =tf.convert_to_tensor(self.test_sentences )
_lowerCAmelCase =model(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase =Path(__UpperCAmelCase ) / """saved.model"""
model.save(__UpperCAmelCase )
_lowerCAmelCase =tf.keras.models.load_model(__UpperCAmelCase )
_lowerCAmelCase =loaded_model(__UpperCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = KandinskyInpaintPipeline
__UpperCamelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__UpperCamelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__UpperCamelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE_ : Any = MultilingualCLIP(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(**lowercase_)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = VQModel(**self.dummy_movq_kwargs)
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_unet
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE_ : List[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : int=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase_)
# create init_image
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.fromarray(np.uinta(lowercase_)).convert('''RGB''').resize((256, 256))
# create mask
SCREAMING_SNAKE_CASE_ : List[Any] = np.ones((64, 64) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : Any = 0
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = self.pipeline_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : str = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**self.get_dummy_inputs(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}')
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : str = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
SCREAMING_SNAKE_CASE_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
SCREAMING_SNAKE_CASE_ : Tuple = np.ones((768, 768) , dtype=np.floataa)
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Tuple = '''a hat'''
SCREAMING_SNAKE_CASE_ : List[str] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE_ : List[str] = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ : str = pipeline(
lowercase_ , image=lowercase_ , mask_image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 91 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :int , lowerCamelCase :AutoencoderKL , lowerCamelCase :CLIPTextModel , lowerCamelCase :CLIPTokenizer , lowerCamelCase :UNetaDConditionModel , lowerCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase :StableDiffusionSafetyChecker , lowerCamelCase :CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self :int , lowerCamelCase :Union[str, List[str]] , lowerCamelCase :int = 512 , lowerCamelCase :int = 512 , lowerCamelCase :int = 50 , lowerCamelCase :float = 7.5 , lowerCamelCase :Optional[Union[str, List[str]]] = None , lowerCamelCase :Optional[int] = 1 , lowerCamelCase :float = 0.0 , lowerCamelCase :Optional[torch.Generator] = None , lowerCamelCase :Optional[torch.FloatTensor] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , lowerCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase :int = 1 , lowerCamelCase :Optional[torch.FloatTensor] = None , **lowerCamelCase :List[str] , ) -> str:
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase )}.''' )
# get prompt text embeddings
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = text_embeddings.shape
UpperCAmelCase__ = text_embeddings.repeat(1 , lowerCamelCase , 1 )
UpperCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ = 42
if negative_prompt is None:
UpperCAmelCase__ = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
UpperCAmelCase__ = negative_prompt
UpperCAmelCase__ = text_input_ids.shape[-1]
UpperCAmelCase__ = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
UpperCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ = uncond_embeddings.shape[1]
UpperCAmelCase__ = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
UpperCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
UpperCAmelCase__ = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
UpperCAmelCase__ = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase__ = latents_reference.to(self.device )
UpperCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase__ = 0 if dx < 0 else dx
UpperCAmelCase__ = 0 if dy < 0 else dy
UpperCAmelCase__ = max(-dx , 0 )
UpperCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ = {}
if accepts_eta:
UpperCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 1 / 0.1_82_15 * latents
UpperCAmelCase__ = self.vae.decode(lowerCamelCase ).sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase__ = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
UpperCAmelCase__ , UpperCAmelCase__ = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase__ = None
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 169 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A : Union[str, Any] = sys.version_info >= (3, 10)
def a__ ( __UpperCamelCase=None , __UpperCamelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 4_2
lowerCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''titi'''
lowerCamelCase__ = '''toto'''
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''titi'''
lowerCamelCase__ = '''toto'''
lowerCamelCase__ = 4_2
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = "toto"
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = BasicEnum(self.foo )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = "toto"
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = MixedTypeEnum(self.foo )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''help message'''} )
lowerCamelCase__ = None
lowerCamelCase__ = list_field(default=[] )
lowerCamelCase__ = list_field(default=[] )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = list_field(default=[] )
lowerCamelCase__ = list_field(default=[1, 2, 3] )
lowerCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = field()
lowerCamelCase__ = field()
lowerCamelCase__ = field()
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ = BasicEnum(self.required_enum )
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = field()
lowerCamelCase__ = None
lowerCamelCase__ = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''help message'''} )
lowerCamelCase__ = None
lowerCamelCase__ = list_field(default=[] )
lowerCamelCase__ = list_field(default=[] )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ) -> int:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((SCREAMING_SNAKE_CASE_) , ) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __A ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __A ( self : Optional[Any] ) -> List[Any]:
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = "toto"
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
SCREAMING_SNAKE_CASE_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE_ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE_ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
SCREAMING_SNAKE_CASE_ = parser.parse_dict(__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __A ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __A ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
SCREAMING_SNAKE_CASE_ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
SCREAMING_SNAKE_CASE_ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 305 | from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A : List[Any] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["DPTFeatureExtractor"]
A : str = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCAmelCase ( lowerCamelCase_ ):
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 336 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.2_5) = }''')
print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 250 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=64 , _a=5 , _a=4 , _a=64 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = MPNetModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , _a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = MPNetForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MPNetForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MPNetForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MPNetForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = True
def __UpperCAmelCase ( self ):
__a = MPNetModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__a = model(_a )[0]
__a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 352 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ) -> List[Any]:
'''simple docstring'''
if "." in tensor_name:
__lowercase= tensor_name.split('.' )
for split in splits[:-1]:
__lowercase= getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__lowercase= new_module
__lowercase= splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__lowercase= tensor_name in module._buffers
__lowercase= getattr(lowercase__ , lowercase__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__lowercase= False
__lowercase= False
if is_buffer or not is_bitsandbytes_available():
__lowercase= False
__lowercase= False
else:
__lowercase= hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__lowercase= isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__lowercase= module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__lowercase= old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
__lowercase= value.to('cpu' )
if value.dtype == torch.inta:
__lowercase= version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__lowercase= torch.tensor(lowercase__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase__ ) and fpaa_statistics is None:
__lowercase= new_value.T
__lowercase= old_value.__dict__
if is_abit:
__lowercase= bnb.nn.IntaParams(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
elif is_abit:
__lowercase= bnb.nn.Paramsabit(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
__lowercase= new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowercase__ ) )
else:
if value is None:
__lowercase= old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
__lowercase= value.to(lowercase__ )
else:
__lowercase= torch.tensor(lowercase__ , device=lowercase__ )
if is_buffer:
__lowercase= new_value
else:
__lowercase= nn.Parameter(lowercase__ , requires_grad=old_value.requires_grad )
__lowercase= new_value
def _lowerCamelCase( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False ) -> Union[str, Any]:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
__lowercase= []
current_key_name.append(lowercase__ )
if (isinstance(lowercase__ , nn.Linear ) or isinstance(lowercase__ , lowercase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowercase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase__ , lowercase__ ):
__lowercase, __lowercase= module.weight.shape
else:
__lowercase= module.in_features
__lowercase= module.out_features
if quantization_config.quantization_method() == "llm_int8":
__lowercase= bnb.nn.LinearabitLt(
lowercase__ , lowercase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__lowercase= True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__lowercase= bnb.nn.Linearabit(
lowercase__ , lowercase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__lowercase= True
# Store the module class in case we need to transpose the weight later
__lowercase= type(lowercase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase__ )
if len(list(module.children() ) ) > 0:
__lowercase, __lowercase= _replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_been_replaced=lowercase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowerCamelCase( lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ) -> List[str]:
'''simple docstring'''
__lowercase= ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__lowercase, __lowercase= _replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowerCamelCase( *lowercase__ , **lowercase__ ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowercase__ , )
return replace_with_bnb_linear(*lowercase__ , **lowercase__ )
def _lowerCamelCase( *lowercase__ , **lowercase__ ) -> List[str]:
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowercase__ , )
return set_module_quantized_tensor_to_device(*lowercase__ , **lowercase__ )
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
__lowercase= deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__lowercase= find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
__lowercase= sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__lowercase= sum(lowercase__ , [] )
__lowercase= len(lowercase__ ) > 0
# Check if it is a base model
__lowercase= not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowercase= list(model.named_children() )
__lowercase= [list_modules[-1][0]]
# add last module together with tied weights
__lowercase= set(lowercase__ ) - set(lowercase__ )
__lowercase= list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
__lowercase= ['.weight', '.bias']
__lowercase= []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowercase= name.replace(lowercase__ , '' )
filtered_module_names.append(lowercase__ )
return filtered_module_names
| 295 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A ( A_ ):
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] =TaTokenizer
UpperCamelCase_ : List[int] =[]
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= vocab_file
__lowercase= False if not self.vocab_file else True
__lowercase= extra_ids
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , )
return max_model_length
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowercase= token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A (self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _A (self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 295 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
super().__init__(*UpperCamelCase , **UpperCamelCase )
_snake_case : List[str] = {}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , *UpperCamelCase : List[str] , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = super().add_tokens(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def UpperCamelCase_ ( self : str , UpperCamelCase : Dict , *UpperCamelCase : int , UpperCamelCase : Optional[int]=1 , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
output.append(UpperCamelCase )
else:
_snake_case : str = []
for i in range(UpperCamelCase ):
_snake_case : Optional[int] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
output.append(UpperCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
_snake_case : Tuple = output
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : str=False , UpperCamelCase : Dict=1.0 ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : Tuple = []
for i in range(len(UpperCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : Any = self.token_map[placeholder_token]
_snake_case : Optional[Any] = tokens[: 1 + int(len(UpperCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : List[str] = copy.copy(UpperCamelCase )
random.shuffle(UpperCamelCase )
_snake_case : Optional[Any] = text.replace(UpperCamelCase , ' '.join(UpperCamelCase ) )
return text
def __call__( self : List[str] , UpperCamelCase : Tuple , *UpperCamelCase : int , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Any=1.0 , **UpperCamelCase : Any ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase , vector_shuffle=UpperCamelCase , prop_tokens_to_load=UpperCamelCase ) , *UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , *UpperCamelCase : int , UpperCamelCase : Tuple=False , UpperCamelCase : Any=1.0 , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase , vector_shuffle=UpperCamelCase , prop_tokens_to_load=UpperCamelCase ) , *UpperCamelCase , **UpperCamelCase , )
| 260 |
from math import ceil
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Union[str, Any] = list(range(0 , lowerCAmelCase ) )
_snake_case : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_snake_case : Any = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase )
# Missing blocks
_snake_case : Dict = [i for i in blocks if i not in device_map_blocks]
_snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase ) )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Optional[Any]:
_snake_case : int = list(range(lowerCAmelCase ) )
_snake_case : Union[str, Any] = int(ceil(n_layers / len(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
| 260 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.