code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase : Optional[Any] = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def A_ ( A__ ) -> Any:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
a__ : Dict = list(s_dict.keys() )
for key in keys:
a__ : Dict = R'.*/layers_(\d+)'
a__ : Tuple = key
if re.match(A__ , A__ ):
a__ : Dict = re.sub(R'layers_(\d+)' , R'block/\1/layer' , A__ )
a__ : Tuple = R'(encoder|decoder)\/'
if re.match(A__ , A__ ):
a__ : List[str] = re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
a__ : Any = re.sub(R'/mlp/' , R'/1/mlp/' , A__ )
a__ : str = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , A__ )
elif groups[0] == "decoder":
a__ : Any = re.sub(R'/mlp/' , R'/2/mlp/' , A__ )
a__ : str = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a__ : Union[str, Any] = new_key.replace(A__ , A__ )
print(F'{key} -> {new_key}' )
a__ : str = s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ : List[Any] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a__ : Tuple = s_dict[key].shape[0]
a__ : Optional[Any] = s_dict[key]
for idx in range(A__ ):
a__ : List[str] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(A__ )
return s_dict
lowercase : Any = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def A_ ( A__ , A__ ) -> int:
# Convert a google style config to the hugging face fromat
import regex as re
with open(A__ , 'r' ) as f:
a__ : Union[str, Any] = f.read()
a__ : Any = re.findall(R'(.*) = ([0-9.]*)' , A__ )
a__ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a__ : Any = float(A__ ) if '.' in value else int(A__ )
a__ : List[str] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , A__ )[0]
a__ : str = str(activation[1] )
a__ : List[str] = num_experts
a__ : Optional[Any] = SwitchTransformersConfig(**A__ )
return config
def A_ ( A__ , A__ , A__=None , A__="./" , A__=8 ) -> Tuple:
# Initialise PyTorch model
print(F'Loading flax weights from : {flax_checkpoint_path}' )
a__ : Union[str, Any] = checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
a__ : Union[str, Any] = convert_gin_to_config(A__ , A__ )
else:
a__ : Union[str, Any] = SwitchTransformersConfig.from_pretrained(A__ )
a__ : int = SwitchTransformersForConditionalGeneration(A__ )
a__ : Any = flax_params['target']
a__ : Any = flatten_dict(A__ , sep='/' )
a__ : List[str] = rename_keys(A__ )
a__ : Optional[Any] = unflatten_dict(A__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowercase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 99 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
stooge(snake_case__ , 0 , len(snake_case__ ) - 1 )
return arr
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_snake_case , _snake_case : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_snake_case : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case__ , i + t , (snake_case__) )
# Recursively sort first 2/3 elements
stooge(snake_case__ , snake_case__ , (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 64 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
lowercase__ : List[str] = {
'''google/fnet-base''': 5_12,
'''google/fnet-large''': 5_12,
}
lowercase__ : Optional[Any] = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Optional[Any] = ["""input_ids""", """token_type_ids"""]
_lowerCAmelCase : Tuple = FNetTokenizer
def __init__( self : List[str] , lowercase_ : str=None , lowercase_ : str=None , lowercase_ : Any=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=True , lowercase_ : int="<unk>" , lowercase_ : Dict="[SEP]" , lowercase_ : List[str]="<pad>" , lowercase_ : Optional[int]="[CLS]" , lowercase_ : int="[MASK]" , **lowercase_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ : Optional[int] = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = do_lower_case
snake_case_ : Optional[int] = remove_space
snake_case_ : int = keep_accents
snake_case_ : Dict = vocab_file
snake_case_ : List[str] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : str = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 371 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155 | 0 |
def lowercase_ ( _A : Dict , _A : int ):
"""simple docstring"""
lowerCamelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : int = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase__ : int = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase__ : List[Any] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 184 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = out_features
_snake_case = num_labels
_snake_case = scope
_snake_case = num_stages
def lowercase (self ) -> List[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase (self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase (self ) -> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase (self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Optional[Any]:
_snake_case = UperNetModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ) -> Union[str, Any]:
return
def lowercase (self ) -> Union[str, Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase (self ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> int:
pass
def lowercase (self ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(UpperCAmelCase )
_snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase (self ) -> Optional[Any]:
pass
@slow
def lowercase (self ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) | 341 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = BarthezTokenizer
__lowercase : Tuple = BarthezTokenizerFast
__lowercase : int = True
__lowercase : Optional[int] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_1_1_2_2)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__SCREAMING_SNAKE_CASE = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__SCREAMING_SNAKE_CASE = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase__ , )
| 255 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE = primes[:idx]
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE = False
for r in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = pow(UpperCamelCase_ , d * 2**r , UpperCamelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 255 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'autoformer'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , a : Optional[int] = None , a : Optional[int] = None , a : str = "student_t" , a : str = "nll" , a : int = 1 , a : List[int] = [1, 2, 3, 4, 5, 6, 7] , a : bool = True , a : int = 0 , a : int = 0 , a : int = 0 , a : int = 0 , a : Optional[List[int]] = None , a : Optional[List[int]] = None , a : int = 64 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 32 , a : int = 32 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : int = 100 , a : float = 0.0_2 , a : bool = True , a : Union[str, Any]=True , a : int = 10 , a : int = 25 , a : int = 3 , **a : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = prediction_length
lowerCAmelCase__ : Any = context_length if context_length is not None else prediction_length
lowerCAmelCase__ : List[str] = distribution_output
lowerCAmelCase__ : int = loss
lowerCAmelCase__ : Dict = input_size
lowerCAmelCase__ : Optional[Any] = num_time_features
lowerCAmelCase__ : str = lags_sequence
lowerCAmelCase__ : Optional[int] = scaling
lowerCAmelCase__ : str = num_dynamic_real_features
lowerCAmelCase__ : List[str] = num_static_real_features
lowerCAmelCase__ : List[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : Optional[int] = cardinality
else:
lowerCAmelCase__ : int = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : List[str] = embedding_dimension
else:
lowerCAmelCase__ : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase__ : Any = d_model
lowerCAmelCase__ : int = encoder_attention_heads
lowerCAmelCase__ : str = decoder_attention_heads
lowerCAmelCase__ : Dict = encoder_ffn_dim
lowerCAmelCase__ : Tuple = decoder_ffn_dim
lowerCAmelCase__ : Optional[Any] = encoder_layers
lowerCAmelCase__ : Optional[Any] = decoder_layers
lowerCAmelCase__ : str = dropout
lowerCAmelCase__ : List[str] = attention_dropout
lowerCAmelCase__ : Tuple = activation_dropout
lowerCAmelCase__ : str = encoder_layerdrop
lowerCAmelCase__ : List[str] = decoder_layerdrop
lowerCAmelCase__ : List[Any] = activation_function
lowerCAmelCase__ : Any = init_std
lowerCAmelCase__ : Dict = use_cache
# Autoformer
lowerCAmelCase__ : List[Any] = label_length
lowerCAmelCase__ : str = moving_average
lowerCAmelCase__ : Union[str, Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=a , **a )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 212 |
from __future__ import annotations
lowerCamelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Matrix | None:
if location := find_empty_location(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = digit
if sudoku(SCREAMING_SNAKE_CASE_ ) is not None:
return grid
lowerCAmelCase__ : List[Any] = 0
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE_ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowerCamelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""") | 212 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase : Optional[int] ={
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] =["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : str =logging.getLogger(__name__)
@dataclass
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "Whether to SortishSamler or not."} )
__lowerCAmelCase :bool = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCAmelCase :bool = field(default=A__ , metadata={"help": "whether to use adafactor"} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(default=A__ , metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[float] = field(
default=A__ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCAmelCase :Optional[str] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 266 | 0 |
'''simple docstring'''
_UpperCamelCase : Dict = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 304 |
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304 | 1 |
"""simple docstring"""
def A_ ( A__ ) -> Tuple:
a__ : Tuple = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
a__ : Optional[Any] = hex_num[0] == '''-'''
if is_negative:
a__ : Any = hex_num[1:]
try:
a__ : Optional[int] = int(A__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
a__ : Any = ''''''
while int_num > 0:
a__ : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase : Optional[int] = data_utils.TransfoXLCorpus
lowercase : List[Any] = data_utils
lowercase : Tuple = data_utils
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
a__ : int = pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : int = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
a__ : List[Any] = corpus.vocab.__dict__
torch.save(A__ , A__ )
a__ : Dict = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
a__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Union[str, Any] = os.path.abspath(A__ )
a__ : Optional[Any] = os.path.abspath(A__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : Dict = TransfoXLConfig()
else:
a__ : Dict = TransfoXLConfig.from_json_file(A__ )
print(F'Building PyTorch model from configuration: {config}' )
a__ : Optional[int] = TransfoXLLMHeadModel(A__ )
a__ : int = load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
a__ : Any = os.path.join(A__ , A__ )
a__ : Dict = os.path.join(A__ , A__ )
print(F'Save PyTorch model to {os.path.abspath(A__ )}' )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {os.path.abspath(A__ )}' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowercase : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 225 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( A_ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =(UniPCMultistepScheduler,)
__UpperCAmelCase : Dict =(("""num_inference_steps""", 2_5),)
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_lowerCamelCase )
return config
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**_lowerCamelCase )
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(_lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , __a=None , **__a ):
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**_lowerCamelCase )
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**_lowerCamelCase )
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def snake_case ( self ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_lowerCamelCase )
elif num_inference_steps is not None and not hasattr(_lowerCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=_lowerCamelCase )
__lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=_lowerCamelCase )
__lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def snake_case ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def snake_case ( self ):
self.check_over_configs(thresholding=_lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def snake_case ( self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , )
__lowerCAmelCase = self.full_loop(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , )
assert not torch.isnan(_lowerCamelCase ).any(), "Samples have nan numbers"
def snake_case ( self ):
self.check_over_configs(lower_order_final=_lowerCamelCase )
self.check_over_configs(lower_order_final=_lowerCamelCase )
def snake_case ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=0 )
def snake_case ( self ):
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def snake_case ( self , **__a ):
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**_lowerCamelCase )
__lowerCAmelCase = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 57 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase__ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCAmelCase__ = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(tmpdirname)
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
UpperCAmelCase__ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase__ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase__ = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCAmelCase__ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 288 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "[PAD]"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1012 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
UpperCAmelCase_ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = "Hello World!"
UpperCAmelCase_ : Optional[Any] = [3_5389, 6672, 49, 2]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 23 |
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48 |
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : List[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
snake_case : str = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
snake_case : List[Any] = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Union[str, Any] = set()
a :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a :Optional[int] = char
a :Optional[int] = set(UpperCAmelCase_ )
return pairs
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ):
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
a :Optional[Any] = vocab_file
a :Optional[Any] = merges_file
a :Any = {}
a :Any = 0
a :int = 1
a :Union[str, Any] = 2
a :List[Any] = 3
self.add_from_file(_lowerCamelCase )
a :List[str] = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
a :List[str] = merges_handle.read().split('''\n''' )[:-1]
a :Any = [tuple(merge.split()[:-1] ) for merge in merges]
a :str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :str = {}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = [self.sep_token_id]
a :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
a :Optional[int] = tuple(_lowerCamelCase )
a :List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a :Union[str, Any] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
a :Optional[Any] = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a :Dict = bigram
a :Union[str, Any] = []
a :int = 0
while i < len(_lowerCamelCase ):
try:
a :Optional[Any] = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a :Union[str, Any] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a :Union[str, Any] = tuple(_lowerCamelCase )
a :int = new_word
if len(_lowerCamelCase ) == 1:
break
else:
a :List[str] = get_pairs(_lowerCamelCase )
a :Union[str, Any] = '''@@ '''.join(_lowerCamelCase )
a :Dict = word[:-4]
a :Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = []
a :str = re.findall(R'''\S+\n?''' , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = ''' '''.join(_lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a :str = f.readlines()
for lineTmp in lines:
a :Tuple = lineTmp.strip()
a :int = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
a :Tuple = line[:idx]
a :Tuple = len(self.encoder )
| 281 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KT''')
lowerCAmelCase__ = TypeVar('''VT''')
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : KT | str = "root" ,lowercase__ : VT | None = None ):
__lowercase = key
__lowercase = value
__lowercase = []
def __repr__( self : Tuple ):
return F"Node({self.key}: {self.value})"
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return len(self.forward )
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : float = 0.5 ,lowercase__ : int = 1_6 ):
__lowercase = Node[KT, VT]()
__lowercase = 0
__lowercase = p
__lowercase = max_level
def __str__( self : List[str] ):
__lowercase = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
__lowercase = max((len(str(lowercase__ ) ) for item in items) ,default=4 )
__lowercase = max(lowercase__ ,4 ) + 4
__lowercase = self.head
__lowercase = []
__lowercase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ ,'''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowercase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ ,'''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
__lowercase = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self : List[str] ):
__lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = []
__lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowercase = node.forward[i]
else:
__lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
__lowercase = value
else:
__lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowercase__ ):
update_vector.append(self.head )
__lowercase = level
__lowercase = Node(lowercase__ ,lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowercase = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
assert skip_list.find('''Some key''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
__lowercase = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'convnextv2'
def __init__( self : Tuple ,lowercase__ : Dict=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Tuple="gelu" ,lowercase__ : Dict=0.0_2 ,lowercase__ : List[Any]=1e-1_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=2_2_4 ,lowercase__ : int=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class __snake_case ( __a ):
"""simple docstring"""
_lowerCamelCase = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowerCamelCase = Features({"""text""": Value("""string""" )} )
_lowerCamelCase = Features({"""summary""": Value("""string""" )} )
_lowerCamelCase = "text"
_lowerCamelCase = "summary"
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 360 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MgpstrTokenizer
_lowerCamelCase = False
_lowerCamelCase = {}
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# fmt: off
__A : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = '''tester'''
__A : Dict = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A : Union[str, Any] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__A : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 1 )
__A : List[Any] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__A , __A : str = self.get_input_output_texts(__lowerCamelCase )
__A : Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
__A : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
__A : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertNotEqual(len(__lowerCamelCase ) , 0 )
__A : Union[str, Any] = tokenizer.decode(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , __lowerCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
| 291 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=7, lowerCAmelCase__=3, lowerCAmelCase__=30, lowerCAmelCase__=400, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=[0.5, 0.5, 0.5], lowerCAmelCase__=[0.5, 0.5, 0.5], lowerCAmelCase__=True, lowerCAmelCase__=1 / 255, lowerCAmelCase__=True, ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def a_ ( self) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=False) -> List[Any]:
if not batched:
snake_case_ = image_inputs[0]
if isinstance(lowerCAmelCase__, Image.Image):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w)
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h)
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
snake_case_ = max(lowerCAmelCase__, key=lambda lowerCAmelCase__: item[0])[0]
snake_case_ = max(lowerCAmelCase__, key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = YolosImageProcessor if is_vision_available() else None
def a_ ( self) -> Optional[Any]:
snake_case_ = YolosImageProcessingTester(self)
@property
def a_ ( self) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self) -> Dict:
snake_case_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__, 'image_mean'))
self.assertTrue(hasattr(lowerCAmelCase__, 'image_std'))
self.assertTrue(hasattr(lowerCAmelCase__, 'do_normalize'))
self.assertTrue(hasattr(lowerCAmelCase__, 'do_resize'))
self.assertTrue(hasattr(lowerCAmelCase__, 'size'))
def a_ ( self) -> int:
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1333})
self.assertEqual(image_processor.do_pad, lowerCAmelCase__)
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=lowerCAmelCase__)
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad, lowerCAmelCase__)
def a_ ( self) -> List[str]:
pass
def a_ ( self) -> Union[str, Any]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, Image.Image)
# Test not batched input
snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__, batched=lowerCAmelCase__)
snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a_ ( self) -> int:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, np.ndarray)
# Test not batched input
snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__, batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a_ ( self) -> Union[str, Any]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, torch.Tensor)
# Test not batched input
snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase__, batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a_ ( self) -> Optional[int]:
# Initialize image_processings
snake_case_ = self.image_processing_class(**self.image_processor_dict)
snake_case_ = self.image_processing_class(do_resize=lowerCAmelCase__, do_normalize=lowerCAmelCase__, do_rescale=lowerCAmelCase__)
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__, torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ = image_processing_a.pad(lowerCAmelCase__, return_tensors='pt')
snake_case_ = image_processing_a(lowerCAmelCase__, return_tensors='pt')
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1e-4))
@slow
def a_ ( self) -> Union[str, Any]:
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r') as f:
snake_case_ = json.loads(f.read())
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = YolosImageProcessor.from_pretrained('hustvl/yolos-small')
snake_case_ = image_processing(images=lowerCAmelCase__, annotations=lowerCAmelCase__, return_tensors='pt')
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCAmelCase__, atol=1e-4))
# verify area
snake_case_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCAmelCase__))
# verify boxes
snake_case_ = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCAmelCase__, atol=1e-3))
# verify image_id
snake_case_ = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCAmelCase__))
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCAmelCase__))
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCAmelCase__))
# verify orig_size
snake_case_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCAmelCase__))
# verify size
snake_case_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCAmelCase__))
@slow
def a_ ( self) -> List[Any]:
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r') as f:
snake_case_ = json.loads(f.read())
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
snake_case_ = YolosImageProcessor(format='coco_panoptic')
snake_case_ = image_processing(images=lowerCAmelCase__, annotations=lowerCAmelCase__, masks_path=lowerCAmelCase__, return_tensors='pt')
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['pixel_values'].shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCAmelCase__, atol=1e-4))
# verify area
snake_case_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCAmelCase__))
# verify boxes
snake_case_ = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCAmelCase__, atol=1e-3))
# verify image_id
snake_case_ = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCAmelCase__))
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCAmelCase__))
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCAmelCase__))
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), lowerCAmelCase__)
# verify orig_size
snake_case_ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCAmelCase__))
# verify size
snake_case_ = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCAmelCase__))
| 69 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__a : Optional[int] = img
__a : Any = img.shape[1]
__a : Optional[int] = img.shape[0]
__a : Tuple = dst_width
__a : List[Any] = dst_height
__a : Optional[int] = self.src_w / self.dst_w
__a : Tuple = self.src_h / self.dst_h
__a : Union[str, Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCamelCase ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a : Optional[int] = self.img[self.get_y(_UpperCAmelCase )][self.get_x(_UpperCAmelCase )]
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_x * x )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return int(self.ratio_y * y )
if __name__ == "__main__":
A , A = 800, 600
A = imread('''image_data/lena.jpg''', 1)
A = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows() | 160 | 0 |
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : int = len(__lowercase )
A_ : int = len(__lowercase )
A_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A_ : list = []
for char_count in range(__lowercase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__lowercase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 192 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : str = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "nllb-moe"
UpperCamelCase : Dict = ["past_key_values"]
UpperCamelCase : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A=12_81_12 , A=10_24 , A=12 , A=40_96 , A=16 , A=12 , A=40_96 , A=16 , A=0.05 , A=0.05 , A=True , A=True , A="relu" , A=10_24 , A=0.1 , A=0.1 , A=0.0 , A=0.02 , A=2 , A=True , A=False , A="float32" , A=False , A=1_28 , A=64 , A=4 , A=4 , A=0.001 , A=0.001 , A="all" , A=False , A=False , A=1.0 , A=0.2 , A=1 , A=0 , A=2 , A=False , **A , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = max_position_embeddings
lowerCamelCase = d_model
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = encoder_layers
lowerCamelCase = encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = decoder_layers
lowerCamelCase = decoder_attention_heads
lowerCamelCase = dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = activation_function
lowerCamelCase = init_std
lowerCamelCase = encoder_layerdrop
lowerCamelCase = decoder_layerdrop
lowerCamelCase = use_cache
lowerCamelCase = encoder_layers
lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase = router_z_loss_coef
lowerCamelCase = router_aux_loss_coef
lowerCamelCase = decoder_sparse_step
lowerCamelCase = encoder_sparse_step
lowerCamelCase = num_experts
lowerCamelCase = expert_capacity
lowerCamelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase = router_dtype
lowerCamelCase = router_ignore_padding_tokens
lowerCamelCase = batch_prioritized_routing
lowerCamelCase = second_expert_policy
lowerCamelCase = normalize_router_prob_before_dropping
lowerCamelCase = moe_eval_capacity_token_fraction
lowerCamelCase = moe_token_dropout
lowerCamelCase = output_router_logits
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , **A , )
| 252 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase = GPTaConfig()
else:
lowerCamelCase = GPTaConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
__SCREAMING_SNAKE_CASE : List[str] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase_( lowercase_ : Tuple ) -> Optional[Any]:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
return sd
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[Any]=rename_keys_prefix ) -> List[str]:
_lowerCamelCase = OrderedDict()
_lowerCamelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase = key
for name_pair in rename_keys_prefix:
_lowerCamelCase = new_key.replace(name_pair[0] , name_pair[1] )
_lowerCamelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple ) -> Tuple:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase = '''pretraining'''
if "vcr" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 5_12}
_lowerCamelCase = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 20_48}
_lowerCamelCase = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_lowerCamelCase = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
_lowerCamelCase = '''vqa'''
elif "nlvr" in checkpoint_path:
_lowerCamelCase = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
_lowerCamelCase = '''nlvr'''
_lowerCamelCase = VisualBertConfig(**lowercase_ )
# Load State Dict
_lowerCamelCase = load_state_dict(lowercase_ )
_lowerCamelCase = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
_lowerCamelCase = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
_lowerCamelCase = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
_lowerCamelCase = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
_lowerCamelCase = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 73 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def lowerCAmelCase_( lowercase_ : NDArray[floataa] , lowercase_ : NDArray[floataa] , lowercase_ : float = 10**-1 ) -> bool:
_lowerCamelCase = cross(lowercase_ , lowercase_ )
_lowerCamelCase = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__SCREAMING_SNAKE_CASE : Union[str, Any] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__SCREAMING_SNAKE_CASE : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__SCREAMING_SNAKE_CASE : Optional[int] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__SCREAMING_SNAKE_CASE : str = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__SCREAMING_SNAKE_CASE : str = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
lowercase : List[Any] = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
# Let's go
lowercase : int = parser.parse_args()
if not hasattr(_UpperCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase : int = args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 255 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """swin2sr"""
__snake_case = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self: Dict , a: Any=64 , a: List[Any]=1 , a: Tuple=3 , a: Union[str, Any]=180 , a: Dict=[6, 6, 6, 6, 6, 6] , a: int=[6, 6, 6, 6, 6, 6] , a: List[Any]=8 , a: Any=2.0 , a: Union[str, Any]=True , a: Tuple=0.0 , a: Any=0.0 , a: Any=0.1 , a: Optional[int]="gelu" , a: Dict=False , a: Optional[int]=0.0_2 , a: str=1e-5 , a: List[str]=2 , a: int=1.0 , a: Dict="1conv" , a: Tuple="pixelshuffle" , **a: Optional[Any] , ):
super().__init__(**a )
__lowerCamelCase : Any = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Any = embed_dim
__lowerCamelCase : Any = depths
__lowerCamelCase : Union[str, Any] = len(a )
__lowerCamelCase : int = num_heads
__lowerCamelCase : List[str] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : int = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = drop_path_rate
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Tuple = use_absolute_embeddings
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : str = upscale
__lowerCamelCase : str = img_range
__lowerCamelCase : Optional[Any] = resi_connection
__lowerCamelCase : Optional[int] = upsampler
| 194 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
__lowerCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCamelCase : Optional[int] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCamelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCamelCase : Union[str, Any] = 2.0 * image - 1.0
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9_995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : str = va.device
__lowerCamelCase : int = va.cpu().numpy()
__lowerCamelCase : List[str] = va.cpu().numpy()
__lowerCamelCase : str = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
__lowerCamelCase : Union[str, Any] = (1 - t) * va + t * va
else:
__lowerCamelCase : List[Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = theta_a * t
__lowerCamelCase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a
__lowerCamelCase : List[Any] = sin_theta_t / sin_theta_a
__lowerCamelCase : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
__lowerCamelCase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param in model.parameters():
__lowerCamelCase : Any = value
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: AutoencoderKL , a: CLIPTextModel , a: CLIPModel , a: CLIPTokenizer , a: UNetaDConditionModel , a: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a: CLIPFeatureExtractor , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
__lowerCamelCase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
__lowerCamelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _snake_case ( self: Optional[Any] , a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _snake_case ( self: Dict ):
self.enable_attention_slicing(a )
def _snake_case ( self: Optional[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: List[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: List[str] , a: List[Any] ):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength ) , a )
__lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self: Union[str, Any] , a: Optional[Any] , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , a: List[str]=None ):
if not isinstance(a , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a )}' )
__lowerCamelCase : Union[str, Any] = image.to(device=a , dtype=a )
if isinstance(a , a ):
__lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
__lowerCamelCase : Tuple = torch.cat(a , dim=0 )
else:
__lowerCamelCase : List[Any] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[str] = 0.1_8_2_1_5 * init_latents
__lowerCamelCase : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
__lowerCamelCase : Optional[int] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(a , a , a )
__lowerCamelCase : int = init_latents
return latents
def _snake_case ( self: Optional[int] , a: Any ):
__lowerCamelCase : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowerCamelCase : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowerCamelCase : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _snake_case ( self: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = self.feature_extractor.preprocess(a )
__lowerCamelCase : Dict = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__lowerCamelCase : List[str] = self.clip_model.get_image_features(a )
__lowerCamelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : Tuple = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self: str , a: str , a: int , a: List[Any] , a: str , a: List[Any] , a: Dict , a: int , ):
__lowerCamelCase : Optional[Any] = latents.detach().requires_grad_()
__lowerCamelCase : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowerCamelCase : str = self.scheduler.alphas_cumprod[timestep]
__lowerCamelCase : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowerCamelCase : Optional[int] = torch.sqrt(a )
__lowerCamelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
__lowerCamelCase : str = self.scheduler.sigmas[index]
__lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : Optional[int] = 1 / 0.1_8_2_1_5 * sample
__lowerCamelCase : Optional[Any] = self.vae.decode(a ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(a )
__lowerCamelCase : Union[str, Any] = self.normalize(a ).to(latents.dtype )
__lowerCamelCase : Tuple = self.clip_model.get_image_features(a )
__lowerCamelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : List[str] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
__lowerCamelCase : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
__lowerCamelCase : Optional[int] = latents.detach() + grads * (sigma**2)
__lowerCamelCase : List[Any] = noise_pred_original
else:
__lowerCamelCase : str = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Any , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Optional[str] = None , a: Optional[str] = None , a: Optional[int] = 512 , a: Optional[int] = 512 , a: float = 0.6 , a: Optional[int] = 50 , a: Optional[float] = 7.5 , a: Optional[int] = 1 , a: float = 0.0 , a: Optional[float] = 100 , a: Optional[torch.Generator] = None , a: Optional[str] = "pil" , a: bool = True , a: float = 0.8 , a: float = 0.1 , a: float = 0.1 , ):
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(a , torch.Generator ) and batch_size > 1:
__lowerCamelCase : List[Any] = [generator] + [None] * (batch_size - 1)
__lowerCamelCase : Dict = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__lowerCamelCase : Any = [x[0] for x in coca_is_none if x[1]]
__lowerCamelCase : str = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Any = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Tuple = self.get_image_description(a )
# get prompt text embeddings for content and style
__lowerCamelCase : int = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : Union[str, Any] = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
__lowerCamelCase : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
__lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowerCamelCase , __lowerCamelCase : Dict = self.get_timesteps(a , a , self.device )
__lowerCamelCase : Tuple = timesteps[:1].repeat(a )
# Preprocess image
__lowerCamelCase : Any = preprocess(a , a , a )
__lowerCamelCase : str = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : Dict = preprocess(a , a , a )
__lowerCamelCase : Optional[int] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : int = slerp(a , a , a )
if clip_guidance_scale > 0:
__lowerCamelCase : List[str] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] = content_text_input.input_ids.shape[-1]
__lowerCamelCase : int = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowerCamelCase : List[Any] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowerCamelCase : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
__lowerCamelCase : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowerCamelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : Dict = {}
if accepts_eta:
__lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
__lowerCamelCase : Optional[int] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowerCamelCase : Optional[Any] = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Tuple = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowerCamelCase , __lowerCamelCase : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(a ).sample
__lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 194 | 1 |
"""simple docstring"""
import os
def UpperCAmelCase ( ) -> Union[str, Any]:
with open(os.path.dirname(UpperCAmelCase ) + '/p022_names.txt' ) as file:
snake_case_ = str(file.readlines()[0] )
snake_case_ = names.replace('"' , '' ).split(',' )
names.sort()
snake_case_ = 0
snake_case_ = 0
for i, name in enumerate(UpperCAmelCase ):
for letter in name:
name_score += ord(UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 69 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
import argparse
import os
import re
_UpperCAmelCase : Union[str, Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_UpperCAmelCase : Tuple = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_UpperCAmelCase : Dict = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def A ( lowercase , lowercase = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.read()
UpperCamelCase = content.split('\n' )
UpperCamelCase = []
UpperCamelCase = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def A ( lowercase = False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith('.py' )]
UpperCamelCase = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
UpperCamelCase = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(lowercase )}. Run `make style` to fix'''
' this.' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 110 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[str] = '''vivit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=[2, 16, 16] ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_fast" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-06 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Optional[int] = image_size
__SCREAMING_SNAKE_CASE :List[str] = num_frames
__SCREAMING_SNAKE_CASE :Any = tubelet_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :Any = qkv_bias
super().__init__(**SCREAMING_SNAKE_CASE__ ) | 191 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : List[Any] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""CLIPFeatureExtractor"""]
__lowerCamelCase : List[Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case_ ( _lowerCAmelCase : int ) -> bool:
UpperCAmelCase : int = int(number**0.5 )
return number == sq * sq
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> tuple[int, int]:
UpperCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase : int = x_den * y_den * z_den
UpperCAmelCase : int = gcd(_lowerCAmelCase , _lowerCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case_ ( _lowerCAmelCase : int = 35 ) -> int:
UpperCAmelCase : set = set()
UpperCAmelCase : int
UpperCAmelCase : Fraction = Fraction(0 )
UpperCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase : Optional[int] = x_num * y_den + x_den * y_num
UpperCAmelCase : Tuple = x_den * y_den
UpperCAmelCase : Optional[Any] = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase : Optional[int] = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=2
UpperCAmelCase : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_lowerCAmelCase ) and is_sq(_lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = int(sqrt(_lowerCAmelCase ) )
UpperCAmelCase : Dict = int(sqrt(_lowerCAmelCase ) )
UpperCAmelCase : Any = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase : str = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=-1
UpperCAmelCase : List[str] = x_num * y_num
UpperCAmelCase : Tuple = x_den * y_num + x_num * y_den
UpperCAmelCase : Optional[int] = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase : str = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=2
UpperCAmelCase : Dict = x_num * x_num * y_num * y_num
UpperCAmelCase : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCAmelCase ) and is_sq(_lowerCAmelCase ):
UpperCAmelCase : Any = int(sqrt(_lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = int(sqrt(_lowerCAmelCase ) )
UpperCAmelCase : Optional[Any] = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase : Tuple = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
for num, den in unique_s:
total += Fraction(_lowerCAmelCase , _lowerCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 23 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__: Tuple = logging.get_logger(__name__)
UpperCamelCase__: Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCamelCase__: Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCamelCase__: Dict = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
UpperCamelCase__: Tuple = "▁"
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Tuple="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="</s>" , __snake_case : Any="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
UpperCAmelCase : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCAmelCase : Optional[Any] = len(self.sp_model ) - 1
UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def A ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : Tuple = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Dict ) -> Optional[int]:
return len(self.sp_model )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[Any] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def A ( self : int , __snake_case : int ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(__snake_case )
return spm_id if spm_id else self.unk_token_id
def A ( self : int , __snake_case : Any ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__snake_case )
def A ( self : List[Any] , __snake_case : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : int = ''''''
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
UpperCAmelCase : str = True
UpperCAmelCase : List[str] = []
else:
current_sub_tokens.append(__snake_case )
UpperCAmelCase : Optional[int] = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.__dict__.copy()
UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[int] , __snake_case : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Union[str, Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 23 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase : Optional[Any] = False, False, False
@dataclass
class __lowerCAmelCase :
lowercase = None
lowercase = True
lowercase = True
lowercase = None
# Automatically constructed
lowercase = "dict"
lowercase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase = field(default="Audio" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__UpperCamelCase = BytesIO()
sf.write(__UpperCAmelCase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__UpperCamelCase = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
__UpperCamelCase = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_2767
__UpperCamelCase = BytesIO(bytes() )
sf.write(__UpperCAmelCase , __UpperCAmelCase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__UpperCamelCase , __UpperCamelCase = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__UpperCamelCase = xsplitext(__UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__UpperCamelCase = token_per_repo_id or {}
__UpperCamelCase = path.split('::' )[-1]
try:
__UpperCamelCase = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
__UpperCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__UpperCamelCase = None
with xopen(__UpperCAmelCase , 'rb' , use_auth_token=__UpperCAmelCase ) as f:
__UpperCamelCase , __UpperCamelCase = sf.read(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = sf.read(__UpperCAmelCase )
__UpperCamelCase = array.T
if self.mono:
__UpperCamelCase = librosa.to_mono(__UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__UpperCamelCase = librosa.resample(__UpperCAmelCase , orig_sr=__UpperCAmelCase , target_sr=self.sampling_rate )
__UpperCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__UpperCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__UpperCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__UpperCamelCase = pa.array([Audio().encode_example(__UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__UpperCamelCase = storage.field('bytes' )
else:
__UpperCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__UpperCamelCase = storage.field('path' )
else:
__UpperCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , 'rb' ) as f:
__UpperCamelCase = f.read()
return bytes_
__UpperCamelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__UpperCamelCase = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
| 367 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = str(id_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = []
__UpperCamelCase = {} # {vertex:distance}
def __lt__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.neighbors.append(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = weight
def A ( snake_case :List[Any] , snake_case :Dict , snake_case :Any , snake_case :str ) -> List[str]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case )
graph[b - 1].add_edge(graph[a - 1] , snake_case )
def A ( snake_case :list , snake_case :Vertex ) -> list:
__UpperCamelCase = []
for u in graph:
__UpperCamelCase = math.inf
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = graph[:]
while q:
__UpperCamelCase = min(snake_case )
q.remove(snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCamelCase = u
__UpperCamelCase = u.edges[v.id]
for i in range(1 , len(snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( snake_case :list , snake_case :Vertex ) -> Iterator[tuple]:
for u in graph:
__UpperCamelCase = math.inf
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = list(snake_case )
hq.heapify(snake_case )
while h:
__UpperCamelCase = hq.heappop(snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCamelCase = u
__UpperCamelCase = u.edges[v.id]
hq.heapify(snake_case )
for i in range(1 , len(snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a : Tuple = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str]=1.0 , _lowercase : Dict=None , _lowercase : Dict=None ) ->Any:
'''simple docstring'''
if rng is None:
a : Dict = global_rng
a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=10 , lowerCAmelCase__=160 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=4000 , lowerCAmelCase__=False , lowerCAmelCase__=True , ) -> str:
a : Union[str, Any] = parent
a : Tuple = batch_size
a : Optional[int] = min_seq_length
a : Optional[Any] = max_seq_length
a : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : List[Any] = padding_value
a : Dict = sampling_rate
a : List[str] = return_attention_mask
a : Union[str, Any] = do_normalize
a : Dict = feature_size
a : Tuple = chunk_length
a : int = hop_length
def __a ( self ) -> List[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : List[Any] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =WhisperFeatureExtractor if is_speech_available() else None
def __a ( self ) -> Tuple:
a : List[str] = WhisperFeatureExtractionTester(self )
def __a ( self ) -> Any:
a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : str = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
a : Tuple = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
a : Union[str, Any] = feat_extract_first.to_dict()
a : Tuple = feat_extract_second.to_dict()
a : Optional[int] = feat_extract_first.mel_filters
a : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a : List[Any] = os.path.join(lowerCAmelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase__ )
a : str = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
a : int = feat_extract_first.to_dict()
a : Tuple = feat_extract_second.to_dict()
a : Any = feat_extract_first.mel_filters
a : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : List[str] = feature_extractor(lowerCAmelCase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a : Tuple = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
a : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : str = np.asarray(lowerCAmelCase__ )
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a : List[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test truncation required
a : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
a : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
a : List[str] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated]
a : List[str] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> int:
import torch
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = np.random.rand(100 , 32 ).astype(np.floataa )
a : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : Any = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Dict = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> List[str]:
# fmt: off
a : Any = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a : Tuple = self._load_datasamples(1 )
a : Any = WhisperFeatureExtractor()
a : List[str] = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = self._load_datasamples(1 )[0]
a : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1E-3 ) )
| 105 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : str = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase__ : int = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
UpperCamelCase__ : Optional[Any] = load_in_abit
UpperCamelCase__ : List[str] = load_in_abit
UpperCamelCase__ : str = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase__ : Union[str, Any] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
UpperCamelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCamelCase__ : str = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ : int = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase__ : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = special_dtypes
UpperCamelCase__ : Optional[int] = no_split_module_classes
UpperCamelCase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ : Dict = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = max_memory
UpperCamelCase__ : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ , UpperCamelCase__ : Dict = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Tuple = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ : int = '''.'''.join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase__ : List[Any] = module.weight.data
if module.bias is not None:
UpperCamelCase__ : List[str] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase__ : Dict = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ : str = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : int = sum(SCREAMING_SNAKE_CASE , [] )
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
UpperCamelCase__ : str = False
if hasattr(SCREAMING_SNAKE_CASE , '''base_model_prefix''' ):
UpperCamelCase__ : int = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : Tuple = list(model.named_children() )
UpperCamelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : Dict = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
UpperCamelCase__ : int = ['''.weight''', '''.bias''']
UpperCamelCase__ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : int = name.replace(SCREAMING_SNAKE_CASE , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = param_name
UpperCamelCase__ : str = model
if "." in tensor_name:
UpperCamelCase__ : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCamelCase__ : Optional[int] = new_module
UpperCamelCase__ : List[str] = splits[-1]
# offload weights
UpperCamelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''meta''' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 146 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) is dict:
return {k: self.to_torch(_UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_UpperCAmelCase , device=self.unet.device )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(_UpperCAmelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(_UpperCAmelCase , '''observations''' )
snake_case_ = obs[None].repeat(_UpperCAmelCase , axis=0 )
snake_case_ = {0: self.to_torch(_UpperCAmelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(_UpperCAmelCase , device=self.unet.device )
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=_UpperCAmelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(_UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , _UpperCAmelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions | 267 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) is dict:
return {k: self.to_torch(_UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_UpperCAmelCase , device=self.unet.device )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(_UpperCAmelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(_UpperCAmelCase , '''observations''' )
snake_case_ = obs[None].repeat(_UpperCAmelCase , axis=0 )
snake_case_ = {0: self.to_torch(_UpperCAmelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(_UpperCAmelCase , device=self.unet.device )
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=_UpperCAmelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(_UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , _UpperCAmelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions | 267 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict =logging.get_logger(__name__)
_UpperCAmelCase : Any ={
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """audio-spectrogram-transformer"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=1_6 , __lowercase=True , __lowercase=1_0 , __lowercase=1_0 , __lowercase=1_0_2_4 , __lowercase=1_2_8 , **__lowercase , ) -> List[str]:
super().__init__(**__lowercase )
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : Optional[int] = frequency_stride
lowerCAmelCase_ : str = time_stride
lowerCAmelCase_ : Optional[Any] = max_length
lowerCAmelCase_ : Optional[Any] = num_mel_bins | 262 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCAmelCase : Optional[Any] =NewType("""DataClass""", Any)
_UpperCAmelCase : Dict =NewType("""DataClassType""", Any)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCAmelCase ( lowerCAmelCase_ )-> Callable[[str], Any]:
lowerCAmelCase_ : str = {str(lowerCAmelCase_ ): choice for choice in choices}
return lambda lowerCAmelCase_ : str_to_choice.get(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( *,
lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = dataclasses.MISSING , lowerCAmelCase_ = None , **lowerCAmelCase_ , )-> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase_ : Dict = {}
if aliases is not None:
lowerCAmelCase_ : str = aliases
if help is not None:
lowerCAmelCase_ : Tuple = help
return dataclasses.field(metadata=lowerCAmelCase_ , default=lowerCAmelCase_ , default_factory=lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Iterable[DataClassType]
def __init__( self , __lowercase , **__lowercase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase_ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
lowerCAmelCase_ : Union[str, Any] = [dataclass_types]
lowerCAmelCase_ : List[Any] = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def lowercase_ ( __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = f"""--{field.name}"""
lowerCAmelCase_ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase_ : List[str] = kwargs.pop('''aliases''' , [] )
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = [aliases]
lowerCAmelCase_ : Any = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__lowercase , '''UnionType''' ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase_ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase_ : Dict = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase_ : str = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase_ : List[Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase_ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
lowerCAmelCase_ : Optional[Any] = field.type.__args__
else:
lowerCAmelCase_ : int = [x.value for x in field.type]
lowerCAmelCase_ : str = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : str = field.default
else:
lowerCAmelCase_ : Tuple = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase_ : Tuple = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase_ : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase_ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase_ : int = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase_ : List[Any] = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
lowerCAmelCase_ : Union[str, Any] = field.type.__args__[0]
lowerCAmelCase_ : Dict = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase_ : Optional[int] = True
else:
lowerCAmelCase_ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase_ : Dict = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase_ : List[Any] = field.default_factory()
else:
lowerCAmelCase_ : int = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase_ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__lowercase )
def lowercase_ ( self , __lowercase ) -> List[Any]:
if hasattr(__lowercase , '''_argument_group_name''' ):
lowerCAmelCase_ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase_ : Dict = self
try:
lowerCAmelCase_ : Dict[str, type] = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__lowercase ):
lowerCAmelCase_ : Any = '''.'''.join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
lowerCAmelCase_ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def lowercase_ ( self , __lowercase=None , __lowercase=False , __lowercase=True , __lowercase=None , __lowercase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase_ : str = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase_ : str = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = args_file_parser.parse_known_args(args=__lowercase )
lowerCAmelCase_ : int = vars(__lowercase ).get(args_file_flag.lstrip('''-''' ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
lowerCAmelCase_ : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase_ : Any = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.parse_known_args(args=__lowercase )
lowerCAmelCase_ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : str = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : str = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
lowerCAmelCase_ : Optional[int] = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : int = set(args.keys() )
lowerCAmelCase_ : str = []
for dtype in self.dataclass_types:
lowerCAmelCase_ : int = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
lowerCAmelCase_ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase_ : List[str] = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}""" )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
with open(Path(__lowercase ) , encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase_ : Dict = json.loads(open_json_file.read() )
lowerCAmelCase_ : str = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Tuple[DataClass, ...]:
lowerCAmelCase_ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase ) | 262 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCamelCase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCamelCase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE_ ( __A : Vector , __A : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(__A ) - np.asarray(__A )) ** 2 ) )
def SCREAMING_SNAKE_CASE_ ( __A : Vector , __A : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(__A , __A ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( ) -> None:
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111 | 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = get_tests_dir("fixtures/dummy-config.json")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = 0
def __UpperCAmelCase ( self ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__a = os.path.join(_a , '''fake-roberta''' )
os.makedirs(_a , exist_ok=_a )
with open(os.path.join(_a , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
__a = AutoConfig.from_pretrained(_a )
self.assertEqual(type(_a ) , _a )
def __UpperCAmelCase ( self ):
try:
AutoConfig.register('''custom''' , _a )
# Wrong model type will raise an error
with self.assertRaises(_a ):
AutoConfig.register('''model''' , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoConfig.register('''bert''' , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
__a = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , '''bert-base is not a local folder and is not a valid model identifier''' ):
__a = AutoConfig.from_pretrained('''bert-base''' )
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__a = AutoConfig.from_pretrained(_a , revision='''aaaaaa''' )
def __UpperCAmelCase ( self ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
__a = AutoConfig.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __UpperCAmelCase ( self ):
class __lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
__UpperCAmelCase : str = "new-model"
try:
AutoConfig.register('''new-model''' , _a )
# If remote code is not set, the default is to use local
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
__a = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_a )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 45 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCAmelCase , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
UpperCamelCase = self.get_masked_index(lowerCamelCase_ )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : GenericTensor ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.ensure_exactly_one_mask_token(lowerCamelCase_ )
return model_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.model(**lowerCamelCase_ )
UpperCamelCase = model_inputs["""input_ids"""]
return model_outputs
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["""input_ids"""][0]
UpperCamelCase = model_outputs["""logits"""]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(lowerCamelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(lowerCamelCase_ , 0 )
UpperCamelCase = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCamelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(lowerCamelCase_ )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
UpperCamelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowerCamelCase_ )
result.append(lowerCamelCase_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=None ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(lowerCamelCase_ , lowerCamelCase_ )
if id_ is None:
UpperCamelCase = self.tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , max_length=1 , truncation=lowerCamelCase_ , )["""input_ids"""]
if len(lowerCamelCase_ ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase = list(set(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCamelCase = np.array(lowerCamelCase_ )
return target_ids
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None ):
"""simple docstring"""
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase_ : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) == 1:
return outputs[0]
return outputs
| 165 | def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __snake_case ( lowerCAmelCase__ ):
__lowerCamelCase : List[str] = '''transfo-xl'''
__lowerCamelCase : Any = ['''mems''']
__lowerCamelCase : List[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , snake_case__=26_7735 , snake_case__=[2_0000, 4_0000, 20_0000] , snake_case__=1024 , snake_case__=1024 , snake_case__=16 , snake_case__=64 , snake_case__=4096 , snake_case__=4 , snake_case__=False , snake_case__=18 , snake_case__=1600 , snake_case__=1000 , snake_case__=True , snake_case__=True , snake_case__=0 , snake_case__=-1 , snake_case__=True , snake_case__=0.1 , snake_case__=0.0 , snake_case__=True , snake_case__="normal" , snake_case__=0.01 , snake_case__=0.01 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=0 , **snake_case__ , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] =vocab_size
UpperCAmelCase : List[Any] =[]
self.cutoffs.extend(__lowerCAmelCase )
if proj_share_all_but_first:
UpperCAmelCase : List[Any] =[False] + [True] * len(self.cutoffs )
else:
UpperCAmelCase : Optional[int] =[False] + [False] * len(self.cutoffs )
UpperCAmelCase : Union[str, Any] =d_model
UpperCAmelCase : Tuple =d_embed
UpperCAmelCase : Union[str, Any] =d_head
UpperCAmelCase : Dict =d_inner
UpperCAmelCase : int =div_val
UpperCAmelCase : Union[str, Any] =pre_lnorm
UpperCAmelCase : str =n_layer
UpperCAmelCase : Optional[int] =n_head
UpperCAmelCase : Optional[int] =mem_len
UpperCAmelCase : Optional[int] =same_length
UpperCAmelCase : Dict =attn_type
UpperCAmelCase : List[str] =clamp_len
UpperCAmelCase : str =sample_softmax
UpperCAmelCase : str =adaptive
UpperCAmelCase : Union[str, Any] =dropout
UpperCAmelCase : List[Any] =dropatt
UpperCAmelCase : List[str] =untie_r
UpperCAmelCase : List[Any] =init
UpperCAmelCase : Dict =init_range
UpperCAmelCase : int =proj_init_std
UpperCAmelCase : Optional[Any] =init_std
UpperCAmelCase : Dict =layer_norm_epsilon
super().__init__(eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 348 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: torch.FloatTensor
class a_ ( a_ , a_ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = ("DownEncoderBlock2D",) , lowercase_ = ("UpDecoderBlock2D",) , lowercase_ = (6_4,) , lowercase_ = 1 , lowercase_ = "silu" , lowercase_ = 3 , lowercase_ = 3_2 , lowercase_ = 2_5_6 , lowercase_ = 3_2 , lowercase_ = None , lowercase_ = 0.1_82_15 , lowercase_ = "group" , ) -> Any:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCAmelCase_ = Encoder(
in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , )
lowerCAmelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase_ = nn.Convad(lowercase_ , lowercase_ , 1 )
lowerCAmelCase_ = VectorQuantizer(lowercase_ , lowercase_ , beta=0.25 , remap=lowercase_ , sane_index_shape=lowercase_ )
lowerCAmelCase_ = nn.Convad(lowercase_ , lowercase_ , 1 )
# pass init params to Decoder
lowerCAmelCase_ = Decoder(
in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , norm_type=lowercase_ , )
@apply_forward_hook
def _lowercase ( self , lowercase_ , lowercase_ = True ) -> VQEncoderOutput:
'''simple docstring'''
lowerCAmelCase_ = self.encoder(lowercase_ )
lowerCAmelCase_ = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowercase ( self , lowercase_ , lowercase_ = False , lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.quantize(lowercase_ )
else:
lowerCAmelCase_ = h
lowerCAmelCase_ = self.post_quant_conv(lowercase_ )
lowerCAmelCase_ = self.decoder(lowercase_ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowerCAmelCase_ = sample
lowerCAmelCase_ = self.encode(lowercase_ ).latents
lowerCAmelCase_ = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 14 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 1 |
from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : Tuple = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
import math
import random
def snake_case ( snake_case__ :float , snake_case__ :bool = False) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value))
# Initial Value
_SCREAMING_SNAKE_CASE = 0.02
def snake_case ( snake_case__ :int , snake_case__ :int) -> float:
_A = float(2 * (random.randint(1 , 100)) - 1)
for _ in range(snake_case__):
# Forward propagation
_A = sigmoid_function(INITIAL_VALUE * weight)
# How much did we miss?
_A = (expected / 100) - layer_a
# Error delta
_A = layer_1_error * sigmoid_function(snake_case__ , snake_case__)
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input('Expected value: '))
_SCREAMING_SNAKE_CASE = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 81 | import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
if k in (0.04, 0.06):
_A = k
_A = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ) -> str:
return str(self.k )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> tuple[cva.Mat, list[list[int]]]:
_A = cva.imread(lowerCAmelCase_ , 0 )
_A , _A = img.shape
_A = []
_A = img.copy()
_A = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
_A , _A = np.gradient(lowerCAmelCase_ )
_A = dx**2
_A = dy**2
_A = dx * dy
_A = 0.04
_A = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
_A = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = (wxx * wyy) - (wxy**2)
_A = wxx + wyy
_A = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = HarrisCorner(0.04, 3)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 81 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
UpperCamelCase__ :int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
UpperCamelCase__ :Any = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase_ )
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
_UpperCamelCase : str = "audio"
_UpperCamelCase : str = "labels"
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Dict ) -> Tuple:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCamelCase_ = copy.deepcopy(self )
UpperCamelCase_ = self.label_schema.copy()
UpperCamelCase_ = features[self.label_column]
UpperCamelCase_ = label_schema
return task_template
@property
def lowercase ( self: Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 365 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCamelCase : List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase : List[Any] = get_tests_dir("fixtures/vocab.json")
UpperCamelCase : Any = get_tests_dir("fixtures")
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 0
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
__UpperCamelCase = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'vocab.json' ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
__UpperCamelCase = WavaVecaProcessor(__UpperCAmelCase , __UpperCAmelCase )
# save in new folder
processor.save_pretrained(__UpperCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(__UpperCAmelCase ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaFeatureExtractor()
__UpperCamelCase = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
__UpperCamelCase = WavaVecaProcessor(__UpperCAmelCase , __UpperCAmelCase )
# save in new folder
processor.save_pretrained(__UpperCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
config_dict.pop('processor_class' )
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write(json.dumps(__UpperCAmelCase ) )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__UpperCAmelCase )
# copy relevant files
copyfile(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' ) as f:
f.write('{}' )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
__UpperCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
__UpperCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
__UpperCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def UpperCAmelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(__UpperCAmelCase , 'vocab.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(__UpperCAmelCase )
__UpperCamelCase = CustomProcessor(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = AutoProcessor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "AutoFeatureExtractor"
lowercase = "AutoTokenizer"
lowercase = False
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoProcessor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local classes.
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__UpperCamelCase = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCAmelCase , 'test-processor' ) , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
__UpperCamelCase = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(new_processor.feature_extractor , __UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCAmelCase , 'test-processor-org' ) , push_to_hub=__UpperCAmelCase , use_auth_token=self._token , organization='valid_org' , )
__UpperCamelCase = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(new_processor.feature_extractor , __UpperCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase = os.path.join(__UpperCAmelCase , 'vocab.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
__UpperCamelCase = CustomTokenizer(__UpperCAmelCase )
__UpperCamelCase = CustomProcessor(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
__UpperCamelCase = Repository(__UpperCAmelCase , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(__UpperCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) ) as f:
__UpperCamelCase = json.load(__UpperCAmelCase )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCAmelCase , 'custom_processing.py' ) ) )
repo.push_to_hub()
__UpperCamelCase = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' )
| 316 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 1 |
"""simple docstring"""
def lowerCamelCase (a_ :list[int] , a_ :list[int]) -> tuple[float, float]:
# Check if the input is valid
if not len(a_) == len(a_) == 3:
raise ValueError('''Please enter a valid equation.''')
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''')
# Extract the coefficients
lowercase , lowercase , lowercase :List[Any] = equationa
lowercase , lowercase , lowercase :Any = equationa
# Calculate the determinants of the matrices
lowercase :Any = aa * ba - aa * ba
lowercase :str = ca * ba - ca * ba
lowercase :Dict = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''')
else:
raise ValueError('''No solution. (Inconsistent system)''')
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase :int = determinant_x / determinant
lowercase :Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 172 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str) -> YolosConfig:
lowercase :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase :List[str] = 192
lowercase :List[str] = 768
lowercase :int = 12
lowercase :str = 3
lowercase :List[Any] = [800, 1333]
lowercase :Any = False
elif yolos_name == "yolos_s_dWr":
lowercase :List[str] = 330
lowercase :List[Any] = 14
lowercase :int = 6
lowercase :List[Any] = 1320
elif "yolos_s" in yolos_name:
lowercase :int = 384
lowercase :Union[str, Any] = 1536
lowercase :int = 12
lowercase :str = 6
elif "yolos_b" in yolos_name:
lowercase :Dict = [800, 1344]
lowercase :List[str] = 91
lowercase :List[Any] = '''huggingface/label-files'''
lowercase :Union[str, Any] = '''coco-detection-id2label.json'''
lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :int = in_proj_weight[: config.hidden_size, :]
lowercase :List[str] = in_proj_bias[: config.hidden_size]
lowercase :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Any = in_proj_weight[-config.hidden_size :, :]
lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase (a_ :str) -> str:
if "backbone" in name:
lowercase :Optional[int] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
lowercase :Any = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowercase :Tuple = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
lowercase :List[Any] = orig_state_dict.pop(a_)
if "qkv" in key:
lowercase :str = key.split('''.''')
lowercase :List[str] = int(key_split[2])
lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase :List[Any] = val[:dim, :]
lowercase :Optional[int] = val[
dim : dim * 2, :
]
lowercase :Any = val[-dim:, :]
else:
lowercase :List[str] = val[:dim]
lowercase :Union[str, Any] = val[dim : dim * 2]
lowercase :List[Any] = val[-dim:]
else:
lowercase :List[str] = val
return orig_state_dict
def lowerCamelCase () -> torch.Tensor:
lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]:
lowercase :Union[str, Any] = get_yolos_config(a_)
# load original state_dict
lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model''']
# load 🤗 model
lowercase :Tuple = YolosForObjectDetection(a_)
model.eval()
lowercase :Dict = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512
lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_)
lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''')
lowercase :List[Any] = model(**a_)
lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes
lowercase , lowercase :int = None, None
if yolos_name == "yolos_ti":
lowercase :Dict = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]])
lowercase :Dict = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]])
elif yolos_name == "yolos_s_200_pre":
lowercase :Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]])
lowercase :List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]])
elif yolos_name == "yolos_s_300_pre":
lowercase :int = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]])
lowercase :Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]])
elif yolos_name == "yolos_s_dWr":
lowercase :int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]])
lowercase :Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]])
elif yolos_name == "yolos_base":
lowercase :Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]])
lowercase :Tuple = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
lowercase :Optional[int] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
lowercase :Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 | 1 |
"""simple docstring"""
from manim import *
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Dict =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : Dict =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[Any] =[mem.copy() for i in range(6 )]
lowerCamelCase__ : int =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : str =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Any =VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : List[str] =Text('CPU' , font_size=24 )
lowerCamelCase__ : int =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowerCamelCase__ : List[str] =[mem.copy() for i in range(4 )]
lowerCamelCase__ : List[str] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Dict =Text('GPU' , font_size=24 )
lowerCamelCase__ : str =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase_ )
lowerCamelCase__ : str =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Any =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Dict =Text('Model' , font_size=24 )
lowerCamelCase__ : Union[str, Any] =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =[]
for i, rect in enumerate(lowerCamelCase_ ):
rect.set_stroke(lowerCamelCase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCamelCase__ : Tuple =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase_ , buff=0.0 )
self.add(lowerCamelCase_ )
cpu_targs.append(lowerCamelCase_ )
lowerCamelCase__ : int =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Dict =Text('Loaded Checkpoint' , font_size=24 )
lowerCamelCase__ : int =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , aligned_edge=lowerCamelCase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCamelCase__ : Any =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : List[str] =MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCamelCase__ : Any =MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.play(Write(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) )
lowerCamelCase__ : str =[]
lowerCamelCase__ : List[Any] =[]
for i, rect in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] =fill.copy().set_fill(lowerCamelCase_ , opacity=0.7 )
target.move_to(lowerCamelCase_ )
first_animations.append(GrowFromCenter(lowerCamelCase_ , run_time=1 ) )
lowerCamelCase__ : str =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait() | 126 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :bool = True ):
"""simple docstring"""
lowerCamelCase__ : dict[T, list[T]] ={} # dictionary of lists
lowerCamelCase__ : int =directed
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :T , lowerCamelCase_ :T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Union[str, Any] =[destination_vertex]
lowerCamelCase__ : Any =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : str =[destination_vertex]
lowerCamelCase__ : Optional[Any] =[]
return self
def __repr__( self :Optional[Any] ):
"""simple docstring"""
return pformat(self.adj_list ) | 126 | 1 |
import random
def lowerCamelCase_ ( _a , _a , _a = False ):
"""simple docstring"""
lowerCAmelCase__ : dict = {i: [] for i in range(UpperCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCAmelCase__ ):
for j in range(i + 1 , UpperCAmelCase__ ):
if random.random() < probability:
graph[i].append(UpperCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCAmelCase__ )
return graph
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return {
i: [j for j in range(UpperCAmelCase__ ) if i != j] for i in range(UpperCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _a , _a , _a=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Dict = ''''''
else:
lowerCAmelCase__ : List[str] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase__ : Dict = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Any = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = dct.pop(_a )
lowerCAmelCase__ : Optional[Any] = val
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ViTConfig()
lowerCAmelCase__ : Optional[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Any = int(vit_name[-12:-10] )
lowerCAmelCase__ : int = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : str = '''huggingface/label-files'''
lowerCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : str = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ : Any = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Tuple = int(vit_name[-6:-4] )
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase__ : List[str] = 192
lowerCAmelCase__ : Tuple = 768
lowerCAmelCase__ : Optional[int] = 12
lowerCAmelCase__ : List[Any] = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase__ : Any = 384
lowerCAmelCase__ : Optional[int] = 1_536
lowerCAmelCase__ : List[Any] = 12
lowerCAmelCase__ : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Tuple = 2_304
lowerCAmelCase__ : Any = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : Optional[Any] = 4_096
lowerCAmelCase__ : Optional[int] = 24
lowerCAmelCase__ : Tuple = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase__ : Tuple = 1_280
lowerCAmelCase__ : Tuple = 5_120
lowerCAmelCase__ : Optional[int] = 32
lowerCAmelCase__ : List[Any] = 16
# load original model from timm
lowerCAmelCase__ : Tuple = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
lowerCAmelCase__ : List[Any] = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : List[str] = ViTModel(_a ).eval()
else:
lowerCAmelCase__ : Any = ViTForImageClassification(_a ).eval()
model.load_state_dict(_a )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : int = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase__ : List[str] = encoding['''pixel_values''']
lowerCAmelCase__ : List[Any] = model(_a )
if base_model:
lowerCAmelCase__ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Union[str, Any] = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1e-3 )
Path(_a ).mkdir(exist_ok=_a )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_a )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 211 | 0 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342 |
import math
class snake_case__ :
def __init__( self , lowerCAmelCase__=0 ) -> Optional[int]: # a graph with Node 0,1,...,N-1
__magic_name__ : Tuple = n
__magic_name__ : Union[str, Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # adjacency matrix for weight
__magic_name__ : List[Any] = [
[math.inf for j in range(0 , lowerCAmelCase__ )] for i in range(0 , lowerCAmelCase__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Dict = w
def __magic_name__ ( self ) -> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__magic_name__ : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.dp[u][v]
if __name__ == "__main__":
__magic_name__: Dict = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 342 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = FunnelTokenizer
lowercase_ = FunnelTokenizerFast
lowercase_ = True
lowercase_ = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : Optional[int] , **_lowerCAmelCase : Tuple ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict , **_lowerCAmelCase : Optional[Any] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE_ = tokenizer('UNwant\u00E9d,running' )
SCREAMING_SNAKE_CASE_ = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE_ = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len ) | 210 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=768 ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = proj_size
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PaintByExampleMapper(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=False ):
SCREAMING_SNAKE_CASE_ = self.model(pixel_values=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE_ = self.final_layer_norm(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.proj_out(_lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
BasicTransformerBlock(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , activation_fn='gelu' , attention_bias=_lowerCAmelCase )
for _ in range(_lowerCAmelCase )
] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_lowerCAmelCase )
return hidden_states | 210 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : str , A : VQModel , A : UNetaDModel , A : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=A , unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : int , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[str] = "pil" , A : bool = True , **A : Union[str, Any] , ):
_UpperCAmelCase : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
_UpperCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase : str = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCAmelCase : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase : Any = {}
if accepts_eta:
_UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_UpperCAmelCase : Any = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_UpperCAmelCase : Dict = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# decode the image latents with the VAE
_UpperCAmelCase : Tuple = self.vqvae.decode(A ).sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 31 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase = GPTaConfig()
else:
lowerCamelCase = GPTaConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252 | 0 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> int:
if not nums:
return 0
lowerCAmelCase__ = nums[0]
lowerCAmelCase__ = 0
for num in nums[1:]:
lowerCAmelCase__ , lowerCAmelCase__ = (
max_excluding + num,
max(A , A ),
)
return max(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 370 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 ) -> int:
lowerCamelCase__ : int = limit + 1
lowerCamelCase__ : Optional[Any] = [0] * limit
for first_term in range(1 , _UpperCAmelCase ):
for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[str] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({} )
SCREAMING_SNAKE_CASE_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 330 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase__ : Tuple[int] = (64,) , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : float = 0.18215 , UpperCAmelCase__ : str = "group" , ) ->Optional[int]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ = Encoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , )
A__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1)
A__ = VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__)
A__ = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1)
# pass init params to Decoder
A__ = Decoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True) ->VQEncoderOutput:
'''simple docstring'''
A__ = self.encoder(UpperCAmelCase__)
A__ = self.quant_conv(UpperCAmelCase__)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase__)
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
A__ , A__ , A__ = self.quantize(UpperCAmelCase__)
else:
A__ = h
A__ = self.post_quant_conv(UpperCAmelCase__)
A__ = self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == '''spatial''' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ = sample
A__ = self.encode(UpperCAmelCase__).latents
A__ = self.decode(UpperCAmelCase__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__)
| 14 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : int = torch.load(lowerCamelCase__ , map_location="""cpu""" )
UpperCAmelCase__ : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
UpperCAmelCase__ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCAmelCase__ : Any = v
else:
UpperCAmelCase__ : int = v
UpperCAmelCase__ : Union[str, Any] = chkpt["""params"""]
UpperCAmelCase__ : List[str] = {n: v for n, v in config.items() if not isinstance(lowerCamelCase__ , (torch.FloatTensor, numpy.ndarray) )}
UpperCAmelCase__ : Optional[Any] = chkpt["""dico_word2id"""]
UpperCAmelCase__ : Dict = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCAmelCase__ : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase__ , indent=2 ) + """\n""" )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 350 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE = (('eta', 0.0), ('num_inference_steps', 5_0))
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = 10, 0.0
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCamelCase )
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCamelCase , num_inference_steps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCamelCase , eta=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = 10, 0.0
scheduler.set_timesteps(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase__ : Any = self.dummy_sample_deter - 0.1
UpperCAmelCase__ : Tuple = samplea.shape[0]
UpperCAmelCase__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase__ : int = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase__ : int = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 )
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 166 | 0 |
import argparse
import copy
def __magic_name__ ( A : int ):
'''simple docstring'''
a = {}
with open(A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a = []
_list.append([line.split()[1], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a = []
_list.append([line.split()[0], line.split()[2]] )
a = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( A : Optional[int], A : Dict ):
'''simple docstring'''
with open(A ) as f:
a = f.read(1 )
a = start_node
a = []
a = start_node
a = 0
while visiting not in first_solution:
a = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A ) and k[0] not in first_solution:
a = k[1]
a = k[0]
first_solution.append(A )
a = distance_of_first_solution + int(A )
a = best_node
first_solution.append(A )
a = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( A : Tuple, A : List[str] ):
'''simple docstring'''
a = []
for n in solution[1:-1]:
a = solution.index(A )
for kn in solution[1:-1]:
a = solution.index(A )
if n == kn:
continue
a = copy.deepcopy(A )
a = kn
a = n
a = 0
for k in _tmp[:-1]:
a = _tmp[_tmp.index(A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a = distance + int(i[1] )
_tmp.append(A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( A : Any, A : List[str], A : Optional[int], A : Dict, A : List[Any] ):
'''simple docstring'''
a = 1
a = first_solution
a = []
a = distance_of_first_solution
a = solution
while count <= iters:
a = find_neighborhood(A, A )
a = 0
a = neighborhood[index_of_best_solution]
a = len(A ) - 1
a = False
while not found:
a = 0
while i < len(A ):
if best_solution[i] != solution[i]:
a = best_solution[i]
a = solution[i]
break
a = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a = True
a = best_solution[:-1]
a = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a = cost
a = solution
else:
a = index_of_best_solution + 1
a = neighborhood[index_of_best_solution]
if len(A ) >= size:
tabu_list.pop(0 )
a = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( A : Optional[Any]=None ):
'''simple docstring'''
a = generate_neighbours(args.File )
a , a = generate_first_solution(
args.File, A )
a , a = tabu_search(
A, A, A, args.Iterations, args.Size, )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 107 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : List[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :DDPMScheduler , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Dict , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :List[str] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :Tuple , __UpperCamelCase :Any=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :List[Any] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :torch.FloatTensor , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
A = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = hint.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
A = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.movq.config.latent_channels
A, A = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds, "hint": hint}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 292 | 0 |
"""simple docstring"""
from math import factorial
SCREAMING_SNAKE_CASE : int = {str(d): factorial(d) for d in range(10)}
def lowercase ( _snake_case : int ) ->int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(_snake_case ) )
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Any = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _snake_case ) if sum_of_digit_factorial(_snake_case ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 359 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
lowerCAmelCase : Optional[int] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
lowerCAmelCase : List[str] = {
"""jukebox""": 512,
}
class __magic_name__ ( _lowercase ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_LYRIC_TOKENS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a , _a=["v3", "v2", "v2"] , _a=512 , _a=5 , _a="<|endoftext|>" , **_a , ):
"""simple docstring"""
lowerCamelCase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase , n_genres=__lowerCamelCase , version=__lowerCamelCase , max_n_lyric_tokens=__lowerCamelCase , **__lowerCamelCase , )
lowerCamelCase = version
lowerCamelCase = max_n_lyric_tokens
lowerCamelCase = n_genres
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(__lowerCamelCase )
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(__lowerCamelCase )
lowerCamelCase = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCamelCase = oov.replace(r"""\-'""" , r"""\-+'""" )
lowerCamelCase = regex.compile(__lowerCamelCase )
lowerCamelCase = {v: k for k, v in self.artists_encoder.items()}
lowerCamelCase = {v: k for k, v in self.genres_encoder.items()}
lowerCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = [self.artists_encoder.get(__lowerCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
lowerCamelCase = [self.genres_encoder.get(__lowerCamelCase , 0 ) for genre in list_genres[genres]]
lowerCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCamelCase = [[self.lyrics_encoder.get(__lowerCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return list(__lowerCamelCase )
def _lowerCAmelCase ( self , _a , _a , _a , **_a ):
"""simple docstring"""
lowerCamelCase = self.prepare_for_tokenization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def _lowerCAmelCase ( self , _a , _a , _a , _a = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCamelCase = artists[idx].lower()
lowerCamelCase = [genres[idx].lower()]
else:
lowerCamelCase = self._normalize(artists[idx] ) + ".v2"
lowerCamelCase = [
self._normalize(__lowerCamelCase ) + ".v2" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCamelCase = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowerCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
lowerCamelCase = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
lowerCamelCase = 0
lowerCamelCase = len(__lowerCamelCase ) + 1
lowerCamelCase = self.vocab
lowerCamelCase = {v: k for k, v in self.vocab.items()}
lowerCamelCase = ""
else:
lowerCamelCase = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowerCamelCase = self._run_strip_accents(__lowerCamelCase )
lowerCamelCase = lyrics.replace("""\\""" , """\n""" )
lowerCamelCase = self.out_of_vocab.sub("""""" , __lowerCamelCase ), [], []
return artists, genres, lyrics
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = unicodedata.normalize("""NFD""" , __lowerCamelCase )
lowerCamelCase = []
for char in text:
lowerCamelCase = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = (
[chr(__lowerCamelCase ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["."]
)
lowerCamelCase = frozenset(__lowerCamelCase )
lowerCamelCase = re.compile(r"""_+""" )
lowerCamelCase = "".join([c if c in accepted else """_""" for c in text.lower()] )
lowerCamelCase = pattern.sub("""_""" , __lowerCamelCase ).strip("""_""" )
return text
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return " ".join(__lowerCamelCase )
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
# Convert to TensorType
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowerCamelCase = tf.constant
lowerCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowerCamelCase = torch.tensor
lowerCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowerCamelCase = jnp.array
lowerCamelCase = _is_jax
else:
lowerCamelCase = np.asarray
lowerCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCamelCase = [inputs]
if not is_tensor(__lowerCamelCase ):
lowerCamelCase = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , _a , _a , _a="" , _a="pt" ):
"""simple docstring"""
lowerCamelCase = [0, 0, 0]
lowerCamelCase = [artist] * len(self.version )
lowerCamelCase = [genres] * len(self.version )
lowerCamelCase = self.tokenize(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase = self._convert_token_to_id(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase = [-INFINITY] * len(full_tokens[-1] )
lowerCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__lowerCamelCase ) )
lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__lowerCamelCase ) )
lowerCamelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.artists_decoder.get(__lowerCamelCase )
lowerCamelCase = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
lowerCamelCase = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 291 | import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["input_features"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any]=80 , __lowerCamelCase: Optional[Any]=1_60_00 , __lowerCamelCase: Any=1_60 , __lowerCamelCase: Optional[int]=30 , __lowerCamelCase: List[str]=4_00 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Union[str, Any]=False , **__lowerCamelCase: Dict , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
__UpperCAmelCase : int = n_fft
__UpperCAmelCase : List[str] = hop_length
__UpperCAmelCase : Optional[Any] = chunk_length
__UpperCAmelCase : Union[str, Any] = chunk_length * sampling_rate
__UpperCAmelCase : Any = self.n_samples // hop_length
__UpperCAmelCase : Tuple = sampling_rate
__UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: np.array ) -> np.ndarray:
__UpperCAmelCase : List[Any] = spectrogram(
__lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
__UpperCAmelCase : Union[str, Any] = log_spec[:, :-1]
__UpperCAmelCase : List[Any] = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
__UpperCAmelCase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowerCamelCase ( __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: List[np.ndarray] , __lowerCamelCase: float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase : Tuple = np.array(__lowerCamelCase , np.intaa )
__UpperCAmelCase : Dict = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
__UpperCAmelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase : Dict = padding_value
normed_input_values.append(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: Dict , __lowerCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: Optional[str] = "max_length" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , **__lowerCamelCase: Dict , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCAmelCase : List[Any] = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : str = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[Any] = [np.asarray([raw_speech] ).T]
__UpperCAmelCase : List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase : List[str] = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
__UpperCAmelCase : str = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
__UpperCAmelCase : Any = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
__UpperCAmelCase : Dict = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
__UpperCAmelCase : str = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase : List[str] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase : int = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def _lowerCamelCase ( self: str ) -> Dict[str, Any]:
__UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 157 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase ="Run commands across TPU VMs for initial setup before running `accelerate launch`."
def _A ( _a : Any=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""tpu-config""" , description=_description )
else:
A = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
A = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_a , default=_a , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_a , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_a , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
A = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_a , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def _A ( _a : Tuple ):
"""simple docstring"""
A = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_a ):
A = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A = defaults.command_file
if not args.command and defaults.commands is not None:
A = defaults.commands
if not args.tpu_name:
A = defaults.tpu_name
if not args.tpu_zone:
A = defaults.tpu_zone
if args.accelerate_version == "dev":
A = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
A = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , _a ):
A = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
A = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _a ):
A = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
A = '''; '''.join(_a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_a )}' )
return
subprocess.run(_a )
print("""Successfully setup pod.""" )
def _A ( ):
"""simple docstring"""
A = tpu_command_parser()
A = parser.parse_args()
tpu_command_launcher(_a )
| 366 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''data2vec-vision'''
def __init__( self ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=1_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3_0_7_2 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1E-12 ,lowerCamelCase_=2_2_4 ,lowerCamelCase_=1_6 ,lowerCamelCase_=3 ,lowerCamelCase_=False ,lowerCamelCase_=False ,lowerCamelCase_=False ,lowerCamelCase_=False ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=True ,lowerCamelCase_=[3, 5, 7, 1_1] ,lowerCamelCase_=[1, 2, 3, 6] ,lowerCamelCase_=True ,lowerCamelCase_=0.4 ,lowerCamelCase_=2_5_6 ,lowerCamelCase_=1 ,lowerCamelCase_=False ,lowerCamelCase_=2_5_5 ,**lowerCamelCase_ ,) -> Optional[Any]:
super().__init__(**lowerCamelCase_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = use_mask_token
A = use_absolute_position_embeddings
A = use_relative_position_bias
A = use_shared_relative_position_bias
A = layer_scale_init_value
A = drop_path_rate
A = use_mean_pooling
# decode head attributes (semantic segmentation)
A = out_indices
A = pool_scales
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = auxiliary_channels
A = auxiliary_num_convs
A = auxiliary_concat_input
A = semantic_loss_ignore_index
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
| 77 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = get_activation("swish" )
self.assertIsInstance(snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = get_activation("silu" )
self.assertIsInstance(snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = get_activation("mish" )
self.assertIsInstance(snake_case , nn.Mish )
self.assertEqual(act(torch.tensor(-2_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_activation("gelu" )
self.assertIsInstance(snake_case , nn.GELU )
self.assertEqual(act(torch.tensor(-1_0_0 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(2_0 , dtype=torch.floataa ) ).item() , 2_0 )
| 311 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ : List[str] ='''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
if attention_mask is None:
lowerCamelCase =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=99 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=0.0_2 , ):
lowerCamelCase =parent
lowerCamelCase =batch_size
lowerCamelCase =seq_length
lowerCamelCase =is_training
lowerCamelCase =use_labels
lowerCamelCase =vocab_size
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =max_position_embeddings
lowerCamelCase =eos_token_id
lowerCamelCase =pad_token_id
lowerCamelCase =bos_token_id
lowerCamelCase =initializer_range
def _snake_case ( self ):
lowerCamelCase =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase =shift_tokens_right(lowercase_ , 1 , 2 )
lowerCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
lowerCamelCase =prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =20
lowerCamelCase =model_class_name(lowercase_ )
lowerCamelCase =model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase , lowerCamelCase =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase =model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowerCamelCase =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase =model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase =model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
lowerCamelCase =model.decode(lowercase_ , lowercase_ )
lowerCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =20
lowerCamelCase =model_class_name(lowercase_ )
lowerCamelCase =model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase , lowerCamelCase =(
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase =model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowerCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase =model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase =model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCamelCase =model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
lowerCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __A ( unittest.TestCase ):
UpperCAmelCase_ = 99
def _snake_case ( self ):
lowerCamelCase =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase =input_ids.shape[0]
lowerCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_config_and_data()
lowerCamelCase =FlaxBlenderbotForConditionalGeneration(lowercase_ )
lowerCamelCase =lm_model(input_ids=lowercase_ )
lowerCamelCase =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowercase_ )
def _snake_case ( self ):
lowerCamelCase =BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase =FlaxBlenderbotForConditionalGeneration(lowercase_ )
lowerCamelCase =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCamelCase =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase =lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
lowerCamelCase =(*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , lowercase_ )
def _snake_case ( self ):
lowerCamelCase =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCamelCase =shift_tokens_right(lowercase_ , 1 , 2 )
lowerCamelCase =np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
lowerCamelCase =np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( a , unittest.TestCase , a ):
UpperCAmelCase_ = True
UpperCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self ):
lowerCamelCase =FlaxBlenderbotModelTester(self )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase =self._prepare_for_class(lowercase_ , lowercase_ )
lowerCamelCase =model_class(lowercase_ )
@jax.jit
def encode_jitted(UpperCAmelCase_ , UpperCAmelCase_=None , **UpperCAmelCase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase =encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase =encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase =model_class(lowercase_ )
lowerCamelCase =model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase ={
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase =decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase =decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowerCamelCase =model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase =model(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def _snake_case ( self ):
lowerCamelCase ={"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
lowerCamelCase ={"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
lowerCamelCase =FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=lowercase_ )
lowerCamelCase =BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
lowerCamelCase =["""Sam"""]
lowerCamelCase =tokenizer(lowercase_ , return_tensors="""jax""" )
lowerCamelCase =model.generate(**lowercase_ , **lowercase_ )
lowerCamelCase ="""Sam is a great name. It means \"sun\" in Gaelic."""
lowerCamelCase =tokenizer.batch_decode(lowercase_ , **lowercase_ )
assert generated_txt[0].strip() == tgt_text
| 369 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( a , a , unittest.TestCase ):
__A = IFInpaintingSuperResolutionPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__A = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _snake_case ( self ):
return self._get_superresolution_dummy_components()
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith("""mps""" ):
lowerCamelCase =torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 262 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : str=None , __a : Dict=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a_ )
@dataclass
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
SCREAMING_SNAKE_CASE__ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
SCREAMING_SNAKE_CASE__ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Benchmark training of model"""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Verbose memory tracing"""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Trace memory line by line"""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Save result to a CSV file"""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Save all print statements in a log file"""} )
SCREAMING_SNAKE_CASE__ = field(default=__lowerCamelCase , metadata={"""help""": """Whether to print environment information"""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
SCREAMING_SNAKE_CASE__ = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
SCREAMING_SNAKE_CASE__ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
SCREAMING_SNAKE_CASE__ = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase_ (self ):
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , lowerCAmelCase__ , )
def UpperCAmelCase_ (self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase_ (self ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase_ (self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 244 | '''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = {}
UpperCamelCase__ :List[Any] = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
UpperCamelCase__ :str = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__snake_case = HfArgumentParser(PretokenizationArguments)
__snake_case = parser.parse_args()
if args.num_workers is None:
__snake_case = multiprocessing.cpu_count()
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case = time.time()
__snake_case = load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
__snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""") | 219 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
UpperCamelCase__ :Optional[int] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[int] = args.func(__a )
service.run()
if __name__ == "__main__":
main() | 219 | 1 |
import unittest
import numpy as np
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase = None , ):
"""simple docstring"""
__lowercase = np.shape(lowercase )
__lowercase = np.shape(lowercase )
__lowercase = np.shape(lowercase )
if shape_a[0] != shape_b[0]:
__lowercase = (
'''Expected the same number of rows for A and B. '''
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(lowercase )
if shape_b[1] != shape_c[1]:
__lowercase = (
'''Expected the same number of columns for B and C. '''
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(lowercase )
__lowercase = pseudo_inv
if a_inv is None:
try:
__lowercase = np.linalg.inv(lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
__lowercase = schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = np.block([[a, b], [b.T, c]] )
__lowercase = np.linalg.det(lowerCAmelCase__ )
__lowercase = np.linalg.det(lowerCAmelCase__ )
__lowercase = np.linalg.det(lowerCAmelCase__ )
self.assertAlmostEqual(lowerCAmelCase__ , det_a * det_s )
def _SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
__lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
__lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase__ ):
schur_complement(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 210 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a : Tuple = """pt"""
elif is_tf_available():
__a : int = """tf"""
else:
__a : Tuple = """jax"""
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = ByTaTokenizer
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__lowercase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowercase = list(filter(lambda lowerCAmelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__lowercase = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowercase = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = '''Unicode €.'''
__lowercase = tokenizer(lowerCAmelCase__ )
__lowercase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''Unicode €.</s>''' )
__lowercase = tokenizer('''e è é ê ë''' )
__lowercase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__lowercase = list(batch.input_ids.numpy()[0] )
else:
__lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowercase = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization. </s>''']
__lowercase = ['''Summary of the text. </s>''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowercase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['''input_ids'''][0] )
self.assertEqual(lowerCAmelCase__ , batch['''labels'''][0] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
__lowercase = [F"<extra_id_{i}>" for i in range(1_25 )]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowercase = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = 0
__lowercase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 210 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase__ ( ) -> Optional[int]:
_A: List[Any] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
_A: Dict = Dataset.from_dict(a )
return dataset
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[Any] = get_dataset()
_A: Tuple = make_duplicate_clusters(lowerCAmelCase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Optional[Any] = get_dataset()
_A , _A: List[str] = deduplicate_dataset(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 2 )
print(lowerCAmelCase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowerCAmelCase_ )
| 301 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : str = open # noqa: we just need to have a builtin inside this module to test it properly
| 301 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_snake_case : int = datasets.utils.logging.get_logger(__name__)
_snake_case : List[str] = ["names", "prefix"]
_snake_case : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_snake_case : int = ["encoding_errors", "on_bad_lines"]
_snake_case : List[str] = ["date_format"]
@dataclass
class a (datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0000
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def __snake_case ( self : List[str] ) -> Tuple:
if self.delimiter is not None:
__snake_case : str = self.delimiter
if self.column_names is not None:
__snake_case : int = self.column_names
@property
def __snake_case ( self : Any ) -> int:
__snake_case : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class a (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def __snake_case ( self : str ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self : int , lowerCamelCase : int ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase , (str, list, tuple) ):
__snake_case : Tuple = data_files
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : List[str] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def __snake_case ( self : List[Any] , lowerCamelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
__snake_case : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : str = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : Any = table_cast(lowerCamelCase , lowerCamelCase )
return pa_table
def __snake_case ( self : List[str] , lowerCamelCase : Dict ) -> Union[str, Any]:
__snake_case : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase ) ):
__snake_case : List[str] = pd.read_csv(lowerCamelCase , iterator=lowerCamelCase , dtype=lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = pa.Table.from_pandas(lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCamelCase )}: {e}' )
raise
| 123 |
from __future__ import annotations
_snake_case : Any = "Muhammad Umer Farooq"
_snake_case : Optional[int] = "MIT"
_snake_case : Union[str, Any] = "1.0.0"
_snake_case : Optional[Any] = "Muhammad Umer Farooq"
_snake_case : List[Any] = "[email protected]"
_snake_case : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : str ) -> None:
super().__init__()
__snake_case : list[str] = []
__snake_case : Any = domain
def __snake_case ( self : List[str] , lowerCamelCase : str , lowerCamelCase : list[tuple[str, str | None]] ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Any = parse.urljoin(self.domain , lowerCamelCase )
self.urls.append(lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
return ".".join(get_sub_domain_name(__lowerCamelCase ).split("." )[-2:] )
def lowerCAmelCase_ ( __lowerCamelCase ):
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCAmelCase_ ( __lowerCamelCase = "https://github.com" ):
__snake_case : Tuple = get_domain_name(__lowerCamelCase )
# Initialize the parser
__snake_case : Dict = Parser(__lowerCamelCase )
try:
# Open URL
__snake_case : Any = requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[str] = requests.get(__lowerCamelCase )
# Get the valid email.
__snake_case : Any = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 123 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
lowercase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase = 4
lowercase = 4_8
lowercase = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = [6, 6, 6, 6]
lowercase = 6_0
lowercase = [6, 6, 6, 6]
lowercase = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = 4
lowercase = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase = 1
lowercase = 1
lowercase = 1_2_6
lowercase = 7
lowercase = 255.0
lowercase = """"""
return config
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase = """layernorm.weight"""
if name == "norm.bias":
lowercase = """layernorm.bias"""
if "conv_first" in name:
lowercase = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase = """swin2sr.""" + name
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[1] )
lowercase = int(key_split[4] )
lowercase = config.embed_dim
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
pass
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
lowercase = SwinaSRForImageSuperResolution(lowerCAmelCase__ )
model.eval()
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
lowercase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
lowercase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6
lowercase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase = transforms(lowerCAmelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase = model(lowerCAmelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase__ , atol=1e-3 )
print("""Looks ok!""" )
lowercase = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
__lowerCAmelCase : List[str] =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 32 | """simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 |
def a__ ( _UpperCamelCase : int ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : int = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCamelCase : Union[str, Any] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def __lowerCamelCase ( A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = SavedModel()
UpperCamelCase = []
with open(os.path.join(A__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCamelCase = json.load(A__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A__ )] )
with open(A__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase = sorted(A__ )
UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A__ )
if strict and len(A__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(A__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*A__ , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_lowerCamelCase : List[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 249 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 249 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'LayoutLMv2ImageProcessor'
SCREAMING_SNAKE_CASE = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Any:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = kwargs.pop("feature_extractor")
__lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __call__( self: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _SCREAMING_SNAKE_CASE: Union[List[List[int]], List[List[List[int]]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[List[int], List[List[int]]]] = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[bool, str, PaddingStrategy] = False , _SCREAMING_SNAKE_CASE: Union[bool, str, TruncationStrategy] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: int = 0 , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , **_SCREAMING_SNAKE_CASE: int , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True.")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True.")
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
# first, apply the image processor
__lowerCAmelCase : List[Any] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase : Optional[Any] = features["words"]
__lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel values
__lowerCAmelCase : Dict = features.pop("pixel_values")
if return_overflowing_tokens is True:
__lowerCAmelCase : Dict = self.get_overflowing_images(_SCREAMING_SNAKE_CASE , encoded_inputs["overflow_to_sample_mapping"])
__lowerCAmelCase : Union[str, Any] = images
return encoded_inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_SCREAMING_SNAKE_CASE) != len(_SCREAMING_SNAKE_CASE):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(_SCREAMING_SNAKE_CASE)} and {len(_SCREAMING_SNAKE_CASE)}""")
return images_with_overflow
def _SCREAMING_SNAKE_CASE ( self: int , *_SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: List[Any]) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int , *_SCREAMING_SNAKE_CASE: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 269 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids
__lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids
__lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id)
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits
__lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean()
__lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) | 269 | 1 |
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Any = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
lowerCamelCase__ = """0094702343221"""
print(is_sri_lankan_phone_number(phone)) | 362 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) ) | 307 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head("""https://huggingface.co""" )
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = XLMProphetNetTokenizer
a = False
a = True
def A ( self : int):
super().setUp()
# We have a SentencePiece fixture for testing
_A : str = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def A ( self : Tuple):
_A : int = '[PAD]'
_A : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
def A ( self : Any):
_A : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '[PAD]')
self.assertEqual(vocab_keys[1] , '[CLS]')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(SCREAMING_SNAKE_CASE) , 1012)
def A ( self : Union[str, Any]):
self.assertEqual(self.get_tokenizer().vocab_size , 1012)
def A ( self : Optional[int]):
_A : Optional[Any] = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE)
_A : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE)
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
_A : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A ( self : List[Any]):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased')
@slow
def A ( self : Optional[int]):
_A : List[Any] = 'Hello World!'
_A : Dict = [35389, 6672, 49, 2]
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE))
@slow
def A ( self : Union[str, Any]):
# fmt: off
_A : Tuple = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , ) | 366 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Tuple):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
_A : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
_A : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Any = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , )
_A : Tuple = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : str):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Tuple):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : int):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(SCREAMING_SNAKE_CASE : Union[str, Any]):
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
_A : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = MultiControlNetModel([controlneta, controlneta])
_A : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
]
_A : str = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : Tuple):
_A : List[str] = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
_A : int = 10.0
_A : Union[str, Any] = 4
_A : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : List[Any] = steps
_A : List[str] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE)[0]
_A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = steps
_A : Any = scale
_A : Dict = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
_A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : str = steps
_A : List[Any] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
_A : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Tuple = steps
_A : Tuple = scale
_A : str = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def A ( self : Optional[Any]):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Any):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : Dict):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def A ( self : str):
_A : Optional[int] = self.get_dummy_components()
_A : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
_A : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny')
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE , controlnet=SCREAMING_SNAKE_CASE)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.Generator(device='cpu').manual_seed(0)
_A : List[Any] = 'evil space-punk bird'
_A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512))
_A : List[str] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512))
_A : Dict = pipe(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , control_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_A : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy')
assert np.abs(expected_image - image).max() < 9e-2
| 227 | 0 |
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = botoa.client('''iam''' )
SCREAMING_SNAKE_CASE : Dict = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_A , AssumeRolePolicyDocument=json.dumps(_A , indent=2 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_A , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_A )["Role"]["Arn"]
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _A , )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if credentials_configuration == 0:
SCREAMING_SNAKE_CASE : Tuple = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
SCREAMING_SNAKE_CASE : int = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
SCREAMING_SNAKE_CASE : List[Any] = _ask_field('''AWS Access Key ID: ''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = aws_access_key_id
SCREAMING_SNAKE_CASE : Tuple = _ask_field('''AWS Secret Access Key: ''' )
SCREAMING_SNAKE_CASE : Dict = aws_secret_access_key
SCREAMING_SNAKE_CASE : int = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
SCREAMING_SNAKE_CASE : int = aws_region
SCREAMING_SNAKE_CASE : str = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _A , )
if role_management == 0:
SCREAMING_SNAKE_CASE : Dict = _ask_field('''Enter your IAM role name: ''' )
else:
SCREAMING_SNAKE_CASE : Dict = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_A )
SCREAMING_SNAKE_CASE : List[Any] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_custom_docker_image:
SCREAMING_SNAKE_CASE : List[str] = _ask_field('''Enter your Docker image: ''' , lambda a__ : str(_A ).lower() )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_sagemaker_inputs_enabled:
SCREAMING_SNAKE_CASE : List[Any] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a__ : str(_A ).lower() , )
SCREAMING_SNAKE_CASE : int = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : List[str] = None
if is_sagemaker_metrics_enabled:
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a__ : str(_A ).lower() , )
SCREAMING_SNAKE_CASE : str = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
if use_dynamo:
SCREAMING_SNAKE_CASE : Dict = '''dynamo_'''
SCREAMING_SNAKE_CASE : str = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
SCREAMING_SNAKE_CASE : str = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
if use_custom_options:
SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Which mode do you want to use?''' , _A , lambda a__ : TORCH_DYNAMO_MODES[int(_A )] , default='''default''' , )
SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_A , error_message='''Please enter yes or no.''' , )
SCREAMING_SNAKE_CASE : Optional[int] = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
SCREAMING_SNAKE_CASE : List[Any] = _ask_options(
_A , _A , lambda a__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
SCREAMING_SNAKE_CASE : str = _ask_field(_A , lambda a__ : str(_A ).lower() , default='''ml.p3.2xlarge''' )
SCREAMING_SNAKE_CASE : str = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , _A , default=1 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_A , use_cpu=_A , dynamo_config=_A , eca_instance_type=_A , profile=_A , region=_A , iam_role_name=_A , mixed_precision=_A , num_machines=_A , sagemaker_inputs_file=_A , sagemaker_metrics_file=_A , )
| 313 | '''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
torch.manual_seed(0)
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """google/ddpm-cifar10-32"""
lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase)
lowerCAmelCase = PNDMScheduler()
lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase)
pndm.to(__lowerCAmelCase)
pndm.set_progress_bar_config(disable=__lowerCAmelCase)
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 272 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCAmelCase_ :Tuple = 1_2_8
elif "12-12" in model_name:
lowerCAmelCase_ :Optional[Any] = 1_2
lowerCAmelCase_ :Dict = 1_2
elif "14-14" in model_name:
lowerCAmelCase_ :Any = 1_4
lowerCAmelCase_ :int = 1_4
elif "16-16" in model_name:
lowerCAmelCase_ :Optional[int] = 1_6
lowerCAmelCase_ :str = 1_6
else:
raise ValueError("""Model not supported""" )
lowerCAmelCase_ :Optional[Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
lowerCAmelCase_ :Any = 3_5
lowerCAmelCase_ :str = """speech-commands-v2-id2label.json"""
else:
lowerCAmelCase_ :Any = 5_2_7
lowerCAmelCase_ :List[Any] = """audioset-id2label.json"""
lowerCAmelCase_ :Tuple = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :List[str] = idalabel
lowerCAmelCase_ :str = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
if "module.v" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowerCAmelCase_ :Tuple = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowerCAmelCase_ :Tuple = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ :List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowerCAmelCase_ :List[str] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ :str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ :str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ :Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ :List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ :Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCAmelCase_ :Any = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :Dict = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
lowerCAmelCase_ :Union[str, Any] = key.split(""".""" )
lowerCAmelCase_ :Union[str, Any] = int(key_split[3] )
lowerCAmelCase_ :Optional[int] = config.hidden_size
if "weight" in key:
lowerCAmelCase_ :Optional[Any] = val[:dim, :]
lowerCAmelCase_ :Optional[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ :str = val[-dim:, :]
else:
lowerCAmelCase_ :str = val[:dim]
lowerCAmelCase_ :str = val[dim : dim * 2]
lowerCAmelCase_ :Tuple = val[-dim:]
else:
lowerCAmelCase_ :Any = val
return orig_state_dict
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = get_audio_spectrogram_transformer_config(lowercase__ )
lowerCAmelCase_ :str = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowerCAmelCase_ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase_ :int = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
lowerCAmelCase_ :List[Any] = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
lowerCAmelCase_ :int = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCAmelCase_ :Dict = -4.2677393 if """speech-commands""" not in model_name else -6.845978
lowerCAmelCase_ :Union[str, Any] = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
lowerCAmelCase_ :Optional[Any] = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8
lowerCAmelCase_ :str = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
lowerCAmelCase_ :Any = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowerCAmelCase_ :List[str] = dataset[0]["""audio"""]["""array"""]
else:
lowerCAmelCase_ :Any = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowerCAmelCase_ :Tuple = torchaudio.load(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = waveform.squeeze().numpy()
lowerCAmelCase_ :List[str] = feature_extractor(lowercase__ , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" )
# forward pass
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCAmelCase_ :Dict = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCAmelCase_ :Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCAmelCase_ :Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCAmelCase_ :Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCAmelCase_ :List[Any] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCAmelCase_ :List[str] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCAmelCase_ :int = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCAmelCase_ :Any = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 0 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : str = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
snake_case__ : Union[str, Any] = {
'''Salesforce/codegen-350M-mono''': 2_048,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = CodeGenTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : int="<|endoftext|>" , UpperCamelCase_ : Tuple="<|endoftext|>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Dict , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop('''add_bos_token''' , UpperCamelCase_ ):
lowerCAmelCase : str = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowerCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowerCAmelCase : List[str] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase : Optional[Any] = add_prefix_space
lowerCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[int] = add_prefix_space
def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
lowerCAmelCase : Tuple = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[List[str]] = None , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : List[str] = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
lowerCAmelCase : str = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
def find_re(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
lowerCAmelCase : Union[str, Any] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCAmelCase : Optional[Any] = list(re.finditer('''^print''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : int = completion[: prints[1].start()]
lowerCAmelCase : List[str] = list(re.finditer('''^def''' , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
lowerCAmelCase : Union[str, Any] = completion[: defs[1].start()]
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 60 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , ) -> Dict:
'''simple docstring'''
__lowerCamelCase = size if size is not None else {'shortest_edge': 20}
__lowerCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'do_center_crop' ) )
self.assertTrue(hasattr(_A , 'crop_size' ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase_ ( self ) -> int:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# Initialize image_processing
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 357 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Dict = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 59 | 0 |
'''simple docstring'''
def __UpperCamelCase ( ):
lowercase__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Any = 6
lowercase__ : Optional[Any] = 1
lowercase__ : int = 1901
lowercase__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 367 | '''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase__ : Optional[Any] = load_dataset('''ashraq/esc50''' )
lowercase__ : Tuple = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Optional[Any] = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> str:
pass
@slow
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase__ : int = load_dataset('''ashraq/esc50''' )
lowercase__ : str = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Any = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase__ : Dict = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase__ : Any = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 214 | 0 |
'''simple docstring'''
import os
def __snake_case( ) -> Optional[Any]:
with open(os.path.dirname(_lowerCAmelCase ) + """/p022_names.txt""" ) as file:
snake_case__ : int = str(file.readlines()[0] )
snake_case__ : Tuple = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 0
for i, name in enumerate(_lowerCAmelCase ):
for letter in name:
name_score += ord(_lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case__ : List[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 35 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : WhisperForConditionalGeneration , lowerCamelCase__ : WhisperProcessor , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase__ , speech_processor=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=16000 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[str] , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.speech_processor.feature_extractor(
lowerCamelCase__ , return_tensors='''pt''' , sampling_rate=lowerCamelCase__ ).input_features.to(self.device )
UpperCamelCase__ : str = self.speech_model.generate(lowerCamelCase__ , max_length=480000 )
UpperCamelCase__ : Dict = self.speech_processor.tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , normalize=lowerCamelCase__ )[
0
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = 1
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCamelCase__ )}." )
# get prompt text embeddings
UpperCamelCase__ : int = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = text_embeddings.shape
UpperCamelCase__ : List[Any] = text_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : List[str]
if negative_prompt is None:
UpperCamelCase__ : Tuple = [''''''] * batch_size
elif type(lowerCamelCase__ ) is not type(lowerCamelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase__ )} !="
F" {type(lowerCamelCase__ )}." )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : str = [negative_prompt]
elif batch_size != len(lowerCamelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ : Any = negative_prompt
UpperCamelCase__ : Any = text_input_ids.shape[-1]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
UpperCamelCase__ : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[str] = uncond_embeddings.shape[1]
UpperCamelCase__ : Optional[int] = uncond_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ : Union[str, Any] = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device='''cpu''' , dtype=lowerCamelCase__ ).to(
self.device )
else:
UpperCamelCase__ : int = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Tuple = {}
if accepts_eta:
UpperCamelCase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : int = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Optional[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = 1 / 0.1_8215 * latents
UpperCamelCase__ : Optional[int] = self.vae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ : int = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase__ , nsfw_content_detected=lowerCamelCase__ )
| 51 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase : Dict = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase : int = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCamelCase )[0]
@deprecated(_UpperCamelCase , 'Please use tf.data to implement this functionality.' )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_UpperCamelCase ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =bytestream.read(rows * cols * num_images )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(_UpperCamelCase , dtype=numpy.uinta )
_SCREAMING_SNAKE_CASE =data.reshape(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 1 )
return data
@deprecated(_UpperCamelCase , 'Please use tf.one_hot on tensors.' )
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =labels_dense.shape[0]
_SCREAMING_SNAKE_CASE =numpy.arange(_UpperCamelCase ) * num_classes
_SCREAMING_SNAKE_CASE =numpy.zeros((num_labels, num_classes) )
_SCREAMING_SNAKE_CASE =1
return labels_one_hot
@deprecated(_UpperCamelCase , 'Please use tf.data to implement this functionality.' )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=False , _UpperCamelCase : Tuple=10 ) -> str:
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=_UpperCamelCase ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =bytestream.read(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(_UpperCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCamelCase , _UpperCamelCase )
return labels
class A__ :
@deprecated(
_a , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , _a : Union[str, Any] , _a : Optional[int] , _a : Dict=False , _a : Tuple=False , _a : List[Any]=dtypes.floataa , _a : Optional[int]=True , _a : List[Any]=None , ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =random_seed.get_seed(_a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_SCREAMING_SNAKE_CASE =dtypes.as_dtype(_a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
_SCREAMING_SNAKE_CASE =1_0000
_SCREAMING_SNAKE_CASE =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
_SCREAMING_SNAKE_CASE =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_SCREAMING_SNAKE_CASE =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_SCREAMING_SNAKE_CASE =images.astype(numpy.floataa )
_SCREAMING_SNAKE_CASE =numpy.multiply(_a , 1.0 / 2_55.0 )
_SCREAMING_SNAKE_CASE =images
_SCREAMING_SNAKE_CASE =labels
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
@property
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return self._images
@property
def A ( self : str ) -> List[str]:
'''simple docstring'''
return self._labels
@property
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self._epochs_completed
def A ( self : Dict , _a : Dict , _a : Dict=False , _a : Tuple=True ) -> Optional[Any]:
'''simple docstring'''
if fake_data:
_SCREAMING_SNAKE_CASE =[1] * 784
_SCREAMING_SNAKE_CASE =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_a )],
[fake_label for _ in range(_a )],
)
_SCREAMING_SNAKE_CASE =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
_SCREAMING_SNAKE_CASE =self.images[perma]
_SCREAMING_SNAKE_CASE =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_SCREAMING_SNAKE_CASE =self._num_examples - start
_SCREAMING_SNAKE_CASE =self._images[start : self._num_examples]
_SCREAMING_SNAKE_CASE =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
_SCREAMING_SNAKE_CASE =self.images[perm]
_SCREAMING_SNAKE_CASE =self.labels[perm]
# Start next epoch
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =batch_size - rest_num_examples
_SCREAMING_SNAKE_CASE =self._index_in_epoch
_SCREAMING_SNAKE_CASE =self._images[start:end]
_SCREAMING_SNAKE_CASE =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_SCREAMING_SNAKE_CASE =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCamelCase , 'Please write your own downloading logic.' )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
if not gfile.Exists(_UpperCamelCase ):
gfile.MakeDirs(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , _UpperCamelCase )
if not gfile.Exists(_UpperCamelCase ):
urllib.request.urlretrieve(_UpperCamelCase , _UpperCamelCase ) # noqa: S310
with gfile.GFile(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.size()
print('Successfully downloaded' , _UpperCamelCase , _UpperCamelCase , 'bytes.' )
return filepath
@deprecated(
_UpperCamelCase , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=False , _UpperCamelCase : str=False , _UpperCamelCase : Any=dtypes.floataa , _UpperCamelCase : Any=True , _UpperCamelCase : Any=50_00 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCamelCase , one_hot=_UpperCamelCase , dtype=_UpperCamelCase , seed=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
return _Datasets(train=_UpperCamelCase , validation=_UpperCamelCase , test=_UpperCamelCase )
if not source_url: # empty string check
_SCREAMING_SNAKE_CASE =DEFAULT_SOURCE_URL
_SCREAMING_SNAKE_CASE ='train-images-idx3-ubyte.gz'
_SCREAMING_SNAKE_CASE ='train-labels-idx1-ubyte.gz'
_SCREAMING_SNAKE_CASE ='t10k-images-idx3-ubyte.gz'
_SCREAMING_SNAKE_CASE ='t10k-labels-idx1-ubyte.gz'
_SCREAMING_SNAKE_CASE =_maybe_download(
_UpperCamelCase , _UpperCamelCase , source_url + train_images_file )
with gfile.Open(_UpperCamelCase , 'rb' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_maybe_download(
_UpperCamelCase , _UpperCamelCase , source_url + train_labels_file )
with gfile.Open(_UpperCamelCase , 'rb' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(_UpperCamelCase , one_hot=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_maybe_download(
_UpperCamelCase , _UpperCamelCase , source_url + test_images_file )
with gfile.Open(_UpperCamelCase , 'rb' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_maybe_download(
_UpperCamelCase , _UpperCamelCase , source_url + test_labels_file )
with gfile.Open(_UpperCamelCase , 'rb' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(_UpperCamelCase , one_hot=_UpperCamelCase )
if not 0 <= validation_size <= len(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =(
'Validation size should be between 0 and '
f"{len(_UpperCamelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =train_images[:validation_size]
_SCREAMING_SNAKE_CASE =train_labels[:validation_size]
_SCREAMING_SNAKE_CASE =train_images[validation_size:]
_SCREAMING_SNAKE_CASE =train_labels[validation_size:]
_SCREAMING_SNAKE_CASE ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
_SCREAMING_SNAKE_CASE =_DataSet(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_DataSet(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_DataSet(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
return _Datasets(train=_UpperCamelCase , validation=_UpperCamelCase , test=_UpperCamelCase )
| 47 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "decision_transformer"
a = ["past_key_values"]
a = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=17 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=5_0256 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = state_dim
SCREAMING_SNAKE_CASE__ = act_dim
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = max_ep_len
SCREAMING_SNAKE_CASE__ = action_tanh
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 371 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A )
SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '''__name__''' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_A , encoding='''utf-8''' ) as reader:
return json.load(_A )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 218 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''Salesforce/blip-image-captioning-base'''
__snake_case = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__snake_case = '''image_captioner'''
__snake_case = AutoModelForVisionaSeq
__snake_case = ['''image''']
__snake_case = ['''text''']
def __init__( self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : "Image" ) ->Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return self.model.generate(**__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0].strip()
| 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Union[str, Any] = np.full((len(_snake_case ), sequence_length, 2) , _snake_case )
else:
__magic_name__ : List[Any] = np.full((len(_snake_case ), sequence_length) , _snake_case )
for i, tensor in enumerate(_snake_case ):
if padding_side == "right":
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Optional[Any] = tensor[:sequence_length]
else:
__magic_name__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(_snake_case , _snake_case ):
__magic_name__ : List[Any] = tensor[:sequence_length]
else:
__magic_name__ : Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = ord(_snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__magic_name__ : Any = unicodedata.category(_snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -100
UpperCamelCase__ = "pt"
def SCREAMING_SNAKE_CASE ( self , _a ):
import torch
__magic_name__ : List[str] = "label" if "label" in features[0].keys() else "labels"
__magic_name__ : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__magic_name__ : Optional[int] = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__magic_name__ : Dict = torch.tensor(batch["entity_ids"] ).shape[1]
__magic_name__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
__magic_name__ : str = [
list(_a ) + [self.label_pad_token_id] * (sequence_length - len(_a )) for label in labels
]
else:
__magic_name__ : int = [
[self.label_pad_token_id] * (sequence_length - len(_a )) + list(_a ) for label in labels
]
__magic_name__ : Dict = [feature["ner_tags"] for feature in features]
__magic_name__ : List[Any] = padding_tensor(_a , -1 , _a , _a )
__magic_name__ : Any = [feature["original_entity_spans"] for feature in features]
__magic_name__ : Any = padding_tensor(_a , (-1, -1) , _a , _a )
__magic_name__ : List[Any] = {k: torch.tensor(_a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 281 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( a__ : int ) -> int:
_UpperCamelCase = SwinConfig()
_UpperCamelCase = swin_name.split('''_''' )
_UpperCamelCase = name_split[1]
_UpperCamelCase = int(name_split[4] )
_UpperCamelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 6, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCamelCase = 128
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (4, 8, 16, 32)
else:
_UpperCamelCase = 192
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCamelCase = 21841
else:
_UpperCamelCase = 1000
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = img_size
_UpperCamelCase = num_classes
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
return config
def lowercase ( a__ : List[Any] ) -> Union[str, Any]:
if "patch_embed.proj" in name:
_UpperCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_UpperCamelCase = '''encoder.''' + name
if "attn.proj" in name:
_UpperCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_UpperCamelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCamelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCamelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
_UpperCamelCase = '''layernorm.weight'''
if name == "norm.bias":
_UpperCamelCase = '''layernorm.bias'''
if "head" in name:
_UpperCamelCase = name.replace('''head''' , '''classifier''' )
else:
_UpperCamelCase = '''swin.''' + name
return name
def lowercase ( a__ : Optional[int] , a__ : List[str] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(a__ )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_split[1] )
_UpperCamelCase = int(key_split[3] )
_UpperCamelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase = val[:dim, :]
_UpperCamelCase = val[
dim : dim * 2, :
]
_UpperCamelCase = val[-dim:, :]
else:
_UpperCamelCase = val[
:dim
]
_UpperCamelCase = val[
dim : dim * 2
]
_UpperCamelCase = val[
-dim:
]
else:
_UpperCamelCase = val
return orig_state_dict
def lowercase ( a__ : List[Any] , a__ : int ) -> Tuple:
_UpperCamelCase = timm.create_model(a__ , pretrained=a__ )
timm_model.eval()
_UpperCamelCase = get_swin_config(a__ )
_UpperCamelCase = SwinForImageClassification(a__ )
model.eval()
_UpperCamelCase = convert_state_dict(timm_model.state_dict() , a__ )
model.load_state_dict(a__ )
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
_UpperCamelCase = image_processor(images=a__ , return_tensors='''pt''' )
_UpperCamelCase = timm_model(inputs['''pixel_values'''] )
_UpperCamelCase = model(**a__ ).logits
assert torch.allclose(a__ , a__ , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 54 | """simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Tuple , __UpperCamelCase : bool , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None ) -> int:
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(__UpperCamelCase )
class UpperCAmelCase_ ( _lowercase):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self : List[str] , __UpperCamelCase : VQModel , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : TransformeraDModel , __UpperCamelCase : VQDiffusionScheduler , __UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> str:
_UpperCamelCase = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
__UpperCamelCase , padding='''max_length''' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 100 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = len(__UpperCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__UpperCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
_UpperCamelCase = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float ) -> torch.FloatTensor:
_UpperCamelCase , _UpperCamelCase = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
_UpperCamelCase = torch.exp(__UpperCamelCase )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 54 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowercase = logging.getLogger(__name__)
def _snake_case ( ):
A = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=snake_case__ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=snake_case__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=snake_case__ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=snake_case__ , default='data/dump' , help='The dump file prefix.' )
A = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
A = BertTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
A = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
A = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['cls_token'] # `<s>`
A = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
A = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
A = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
A = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(snake_case__ )} examples to process.' )
A = []
A = 0
A = 1_0000
A = time.time()
for text in data:
A = F'{bos} {text.strip()} {sep}'
A = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
A = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
A = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(snake_case__ )} examples processed.' )
A = F'{args.dump_file}.{args.tokenizer_name}.pickle'
A = tokenizer.vocab_size
if vocab_size < (1 << 16):
A = [np.uintaa(snake_case__ ) for d in rslt]
else:
A = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 74 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(__lowerCamelCase )
def __magic_name__ ( self : List[str] , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Union[str, Any] = {}
_A: int = {}
_A: int = {}
# preprocess args
if "points_per_batch" in kwargs:
_A: List[str] = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
_A: Tuple = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
_A: Dict = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
_A: Dict = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
_A: Any = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
_A: Optional[Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
_A: int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
_A: int = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
_A: int = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
_A: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
_A: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
_A: List[Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , lowerCAmelCase_ : List[Any] , *lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return super().__call__(__lowerCamelCase , *__lowerCamelCase , num_workers=__lowerCamelCase , batch_size=__lowerCamelCase , **__lowerCamelCase )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=6_4 , lowerCAmelCase_ : Any = 0 , lowerCAmelCase_ : Any = 5_1_2 / 1_5_0_0 , lowerCAmelCase_ : Optional[int] = 3_2 , lowerCAmelCase_ : List[Any] = 1 , ):
"""simple docstring"""
_A: Optional[int] = load_image(__lowerCamelCase )
_A: str = self.image_processor.size['''longest_edge''']
_A , _A , _A , _A: int = self.image_processor.generate_crop_boxes(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_A: Optional[int] = self.image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
_A: Any = self.get_inference_context()
with inference_context():
_A: Any = self._ensure_tensor_on_device(__lowerCamelCase , device=self.device )
_A: List[Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
_A: Optional[int] = image_embeddings
_A: Dict = grid_points.shape[1]
_A: Optional[int] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
_A: Dict = grid_points[:, i : i + points_per_batch, :, :]
_A: Tuple = input_labels[:, i : i + points_per_batch]
_A: List[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=0.88 , lowerCAmelCase_ : int=0.95 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Dict=1 , ):
"""simple docstring"""
_A: Tuple = model_inputs.pop('''input_boxes''' )
_A: List[Any] = model_inputs.pop('''is_last''' )
_A: Optional[int] = model_inputs.pop('''original_sizes''' ).tolist()
_A: Any = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
_A: List[Any] = self.model(**__lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_A: Union[str, Any] = model_outputs['''pred_masks''']
_A: List[Any] = self.image_processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , binarize=__lowerCamelCase )
_A: str = model_outputs['''iou_scores''']
_A , _A , _A: str = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=0.7 , ):
"""simple docstring"""
_A: List[str] = []
_A: int = []
_A: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
_A: Any = torch.cat(__lowerCamelCase )
_A: Union[str, Any] = torch.cat(__lowerCamelCase )
_A , _A , _A , _A: List[Any] = self.image_processor.post_process_for_mask_generation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_A: List[Any] = defaultdict(__lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCamelCase )
_A: Dict = {}
if output_rle_mask:
_A: List[str] = rle_mask
if output_bboxes_mask:
_A: Dict = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 367 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : str =StableDiffusionPanoramaPipeline
UpperCamelCase_ : str =TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Any =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : int =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowercase= DDIMScheduler()
torch.manual_seed(0 )
__lowercase= AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowercase= CLIPTextModel(lowerCAmelCase )
__lowercase= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= torch.manual_seed(lowerCAmelCase )
__lowercase= {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase )
__lowercase= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= sd_pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A (self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase )
__lowercase= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= 'french fries'
__lowercase= sd_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase )
__lowercase= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= sd_pipe(**lowerCAmelCase , view_batch_size=2 )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
__lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase )
__lowercase= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= sd_pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCAmelCase )
__lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase )
__lowercase= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= self.get_dummy_inputs(lowerCAmelCase )
__lowercase= sd_pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self , lowerCAmelCase=0 ):
__lowercase= torch.manual_seed(lowerCAmelCase )
__lowercase= {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _A (self ):
__lowercase= 'stabilityai/stable-diffusion-2-base'
__lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' )
__lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase= self.get_inputs()
__lowercase= pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
__lowercase= np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _A (self ):
__lowercase= StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCAmelCase )
__lowercase= LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase= self.get_inputs()
__lowercase= pipe(**lowerCAmelCase ).images
__lowercase= image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
__lowercase= np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _A (self ):
__lowercase= 0
def callback_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
__lowercase= True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowercase= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
__lowercase= latents[0, -3:, -3:, -1]
__lowercase= np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowercase= latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
__lowercase= latents[0, -3:, -3:, -1]
__lowercase= np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowercase= False
__lowercase= 'stabilityai/stable-diffusion-2-base'
__lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' )
__lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
__lowercase= self.get_inputs()
pipe(**lowerCAmelCase , callback=lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _A (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase= 'stabilityai/stable-diffusion-2-base'
__lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' )
__lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
__lowercase= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase= self.get_inputs()
__lowercase= pipe(**lowerCAmelCase )
__lowercase= torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 295 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= 2
__lowercase= []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = np.max(_snake_case , axis=-1 , keepdims=_snake_case )
UpperCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case )
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ,**A ):
UpperCAmelCase = {}
if "second_text" in kwargs:
UpperCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _UpperCamelCase ( self ,A ,A=None ):
return self.tokenizer(A ,text_pair=A ,return_tensors=self.framework )
def _UpperCamelCase ( self ,A ):
return self.model(**A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = model_outputs.logits[0].numpy()
UpperCAmelCase = softmax(A )
UpperCAmelCase = np.argmax(A )
UpperCAmelCase = self.model.config.idalabel[best_class]
UpperCAmelCase = probabilities[best_class].item()
UpperCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 234 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_UpperCamelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_UpperCamelCase = dataset.iloc[:, 1:2].values
_UpperCamelCase = dataset.iloc[:, 2].values
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_UpperCamelCase = PolynomialFeatures(degree=4)
_UpperCamelCase = poly_reg.fit_transform(X)
_UpperCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ):
"""simple docstring"""
plt.scatter(_snake_case , _snake_case , color="""red""" )
plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 234 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase__ = float(embedding_dim // 2 )
lowercase__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ = min_timescale * jnp.exp(jnp.arange(snake_case_ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ = jnp.expand_dims(snake_case_ , 1 ) * jnp.expand_dims(snake_case_ , 0 )
# scale embeddings
lowercase__ = scale * emb
if flip_sin_to_cos:
lowercase__ = jnp.concatenate([jnp.cos(snake_case_ ), jnp.sin(snake_case_ )] , axis=1 )
else:
lowercase__ = jnp.concatenate([jnp.sin(snake_case_ ), jnp.cos(snake_case_ )] , axis=1 )
lowercase__ = jnp.reshape(snake_case_ , [jnp.shape(snake_case_ )[0], embedding_dim] )
return signal
class A ( nn.Module ):
'''simple docstring'''
A__ = 32
A__ = jnp.floataa
@nn.compact
def __call__(self : Dict , _UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
lowercase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__a )
lowercase__ = nn.silu(__a )
lowercase__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__a )
return temb
class A ( nn.Module ):
'''simple docstring'''
A__ = 32
A__ = False
A__ = 1
@nn.compact
def __call__(self : Optional[Any] , _UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 305 | '''simple docstring'''
from __future__ import annotations
import queue
class __A :
def __init__(self : Optional[Any] , __a : str ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCAmelCase_ ( ) -> TreeNode:
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower()
UpperCAmelCase_ = queue.Queue()
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = left_node
q.put(snake_case_ )
UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """
UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n"
if check == "n":
return tree_node
UpperCAmelCase_ = TreeNode(int(snake_case_ ) )
UpperCAmelCase_ = right_node
q.put(snake_case_ )
raise
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = queue.Queue()
q.put(snake_case_ )
while not q.empty():
UpperCAmelCase_ = []
while not q.empty():
UpperCAmelCase_ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase_ = stack.pop()
# start to traverse its right child
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ = []
UpperCAmelCase_ = node
while n or stack:
while n:
stack.append(snake_case_ )
UpperCAmelCase_ = n.left
UpperCAmelCase_ = stack.pop()
print(n.data , end="," )
UpperCAmelCase_ = n.right
def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
SCREAMING_SNAKE_CASE_: TreeNode =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__snake_case = []
for i in range(lowerCAmelCase__ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
A_ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
A_ : str = 2
@register_to_config
def __init__(self : Tuple , a__ : Tuple = 1000 , a__ : List[str] = 0.0_0_0_8_5 , a__ : Optional[Any] = 0.0_1_2 , a__ : Union[str, Any] = "linear" , a__ : Dict = None , a__ : Optional[int] = "epsilon" , a__ : Union[str, Any] = "linspace" , a__ : Optional[int] = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__snake_case = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def a (self : Tuple , a__ : Optional[int] , a__ : str=None ):
"""simple docstring"""
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case = 1 if len(_a ) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a (self : Optional[Any] ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a (self : Any , a__ : Dict , a__ : Dict , ):
"""simple docstring"""
__snake_case = self.index_for_timestep(_a )
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
else:
__snake_case = self.sigmas_interpol[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a (self : Tuple , a__ : List[Any] , a__ : str = None , a__ : Tuple = None , ):
"""simple docstring"""
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" )
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case = torch.from_numpy(np.log(_a ) ).to(_a )
__snake_case = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__snake_case = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__snake_case = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__snake_case = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(_a )
def a (self : int , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = sigma.log()
# get distribution
__snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case = low_idx + 1
__snake_case = self.log_sigmas[low_idx]
__snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.view(sigma.shape )
return t
@property
def a (self : int ):
"""simple docstring"""
return self.sample is None
def a (self : List[Any] , a__ : List[Any] , a__ : str , a__ : Tuple , a__ : Dict = True , ):
"""simple docstring"""
__snake_case = self.index_for_timestep(_a )
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas_interpol[step_index + 1]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas_interpol[step_index]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
__snake_case = self.sample
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def a (self : List[Any] , a__ : Optional[Any] , a__ : int , a__ : str , ):
"""simple docstring"""
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case = self.timesteps.to(original_samples.device )
__snake_case = timesteps.to(original_samples.device )
__snake_case = [self.index_for_timestep(_a , _a ) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case = sigma.unsqueeze(-1 )
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__(self : int ):
"""simple docstring"""
return self.config.num_train_timesteps
| 361 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 167 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """bart"""
__lowerCAmelCase : Optional[int] = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=5_02_65 , _lowerCamelCase : Optional[Any]=10_24 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Dict=40_96 , _lowerCamelCase : Tuple=16 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Tuple=40_96 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : int="gelu" , _lowerCamelCase : Any=10_24 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Optional[int]=0.02 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Any=False , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=1 , _lowerCamelCase : int=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Any=True , _lowerCamelCase : str=2 , _lowerCamelCase : str=2 , **_lowerCamelCase : str , ):
"""simple docstring"""
A_ : Dict = vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : Union[str, Any] = d_model
A_ : Optional[int] = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : Union[str, Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : List[str] = decoder_layers
A_ : Any = decoder_attention_heads
A_ : List[Any] = dropout
A_ : Optional[int] = attention_dropout
A_ : List[Any] = activation_dropout
A_ : Tuple = activation_function
A_ : Any = init_std
A_ : Union[str, Any] = encoder_layerdrop
A_ : Optional[Any] = decoder_layerdrop
A_ : Tuple = classifier_dropout
A_ : Tuple = use_cache
A_ : List[str] = encoder_layers
A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCamelCase ):
A_ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class lowercase ( __UpperCAmelCase):
@property
def a_ ( self : Optional[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ : Tuple = {0: '''batch'''}
A_ : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
A_ , A_ : Any = self.num_layers
for i in range(_lowerCamelCase ):
A_ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
A_ : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def a_ ( self : str ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super().outputs
else:
A_ : List[Any] = super(_lowerCamelCase , self ).outputs
if self.use_past:
A_ , A_ : int = self.num_layers
for i in range(_lowerCamelCase ):
A_ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
A_ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a_ ( self : Union[str, Any] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
A_ : Tuple = seq_length if not self.use_past else 1
A_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A_ : Dict = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Union[str, Any] = common_inputs['''input_ids'''].shape
A_ : Any = common_inputs['''decoder_input_ids'''].shape[1]
A_ , A_ : Optional[Any] = self.num_attention_heads
A_ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Optional[Any] = decoder_seq_length + 3
A_ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : Optional[int] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
A_ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_ , A_ : Optional[Any] = self.num_layers
A_ : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
A_ : Tuple = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
A_ : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def a_ ( self : Optional[int] , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : Union[str, Any] = seqlen + 2
A_ , A_ : Tuple = self.num_layers
A_ , A_ : Optional[int] = self.num_attention_heads
A_ : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : str = common_inputs['''attention_mask'''].dtype
A_ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
A_ : int = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def a_ ( self : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
A_ : List[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Dict = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
A_ : int = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
A_ : List[str] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : List[str] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def a_ ( self : Tuple , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
A_ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def a_ ( self : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A_ : Tuple = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
A_ : List[Any] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 167 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase_ = None
UpperCAmelCase_ = "utf-8"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = True # deprecated
UpperCAmelCase_ = None # deprecated
UpperCAmelCase_ = 10 << 20 # 10MB
UpperCAmelCase_ = None
class a_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase_ = JsonConfig
def __snake_case ( self : List[Any]):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
lowerCAmelCase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : Any , lowercase__ : Tuple):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCamelCase_ , (str, list, tuple)):
lowerCAmelCase__ = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(UpperCamelCase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(UpperCamelCase_) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'files': files}))
return splits
def __snake_case ( self : Optional[Any] , lowercase__ : pa.Table):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
lowerCAmelCase__ = self.config.features.arrow_schema.field(UpperCamelCase_).type
lowerCAmelCase__ = pa_table.append_column(UpperCamelCase_ , pa.array([None] * len(UpperCamelCase_) , type=UpperCamelCase_))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(UpperCamelCase_ , self.config.features.arrow_schema)
return pa_table
def __snake_case ( self : Optional[Any] , lowercase__ : Optional[Any]):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_)
# We keep only the field we are interested in
lowerCAmelCase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCamelCase_ , (list, tuple)):
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset])
lowerCAmelCase__ = {col: [row.get(UpperCamelCase_) for row in dataset] for col in keys}
else:
lowerCAmelCase__ = dataset
lowerCAmelCase__ = pa.Table.from_pydict(UpperCamelCase_)
yield file_idx, self._cast_table(UpperCamelCase_)
# If the file has one json object per line
else:
with open(UpperCamelCase_ , 'rb') as f:
lowerCAmelCase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase__ = max(self.config.chunksize // 32 , 16 << 10)
lowerCAmelCase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
lowerCAmelCase__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCamelCase_)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase__ = batch.decode(self.config.encoding , errors=UpperCamelCase_).encode('utf-8')
try:
while True:
try:
lowerCAmelCase__ = paj.read_json(
io.BytesIO(UpperCamelCase_) , read_options=paj.ReadOptions(block_size=UpperCamelCase_))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCamelCase_ , pa.ArrowInvalid)
and "straddling" not in str(UpperCamelCase_)
or block_size > len(UpperCamelCase_)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCamelCase_)} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_)
except json.JSONDecodeError:
logger.error(F"""Failed to read file \'{file}\' with error {type(UpperCamelCase_)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCamelCase_ , UpperCamelCase_): # list is the only sequence type supported in JSON
try:
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset])
lowerCAmelCase__ = {col: [row.get(UpperCamelCase_) for row in dataset] for col in keys}
lowerCAmelCase__ = pa.Table.from_pydict(UpperCamelCase_)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file \'{file}\' with error {type(UpperCamelCase_)}: {e}""")
raise ValueError(F"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCamelCase_)
break
else:
logger.error(F"""Failed to read file \'{file}\' with error {type(UpperCamelCase_)}: {e}""")
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
F"""Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase_)
batch_idx += 1
| 355 | from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , lowercase__ : pyspark.sql.DataFrame , lowercase__ : Optional[NamedSplit] = None , lowercase__ : Optional[Features] = None , lowercase__ : bool = True , lowercase__ : str = None , lowercase__ : bool = False , lowercase__ : str = None , lowercase__ : bool = True , lowercase__ : str = "arrow" , **lowercase__ : Any , ):
'''simple docstring'''
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = load_from_cache_file
lowerCAmelCase__ = file_format
lowerCAmelCase__ = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def __snake_case ( self : Tuple):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 119 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A__ ( lowerCAmelCase__ ):
"""simple docstring"""
@require_torch
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a__ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a__ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a__ : List[str] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask' , model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
a__ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
a__ : Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : List[str] = '1'
a__ : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a__ : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a__ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a__ : List[str] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase__)
BertModel.from_pretrained(lowerCAmelCase__)
BertTokenizer.from_pretrained(lowerCAmelCase__)
pipeline(task='fill-mask' , model=lowerCAmelCase__)
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
a__ : Union[str, Any] = self.get_env()
a__ : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a__ : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a__ : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a__ : str = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
a__ : Optional[int] = self.get_env()
a__ : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# next emulate no network
a__ : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = '1'
a__ : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = '\nfrom transformers import pipeline\n '
a__ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a__ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a__ : Dict = self.get_env()
a__ : Union[str, Any] = '1'
a__ : Any = [sys.executable, '-c', '\n'.join([load, mock, run])]
a__ : Union[str, Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , )
@require_torch
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[Any] = '\nfrom transformers import AutoModel\n '
a__ : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a__ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : int = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Any = '1'
a__ : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
| 99 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCAmelCase ) ):
if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Insert current vertex into path as next transition
snake_case_ = next_ver
# Validate created path
if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]:
snake_case_ = [-1] * (len(UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
snake_case_ = snake_case_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
| 69 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Dict ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE_: str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 106 | '''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 106 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class _snake_case ( lowercase_ ):
def __init__( self , *a__ , **a__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 85 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 1 |
import math
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1
UpperCamelCase : List[str] = n
UpperCamelCase : Tuple = [
[math.inf for j in range(0, SCREAMING_SNAKE_CASE_ )] for i in range(0, SCREAMING_SNAKE_CASE_ )
] # adjacency matrix for weight
UpperCamelCase : Dict = [
[math.inf for j in range(0, SCREAMING_SNAKE_CASE_ )] for i in range(0, SCREAMING_SNAKE_CASE_ )
] # dp[i][j] stores minimum distance from i to j
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : int = w
def snake_case_ ( self ) -> Union[str, Any]:
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
UpperCamelCase : Tuple = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.dp[u][v]
if __name__ == "__main__":
__UpperCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 350 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = ["input_values", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1_6000, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = "hann_window", SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 7600, SCREAMING_SNAKE_CASE_ = 1e-10, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, padding_value=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Union[str, Any] = num_mel_bins
UpperCamelCase : int = hop_length
UpperCamelCase : Any = win_length
UpperCamelCase : Dict = win_function
UpperCamelCase : Any = frame_signal_scale
UpperCamelCase : str = fmin
UpperCamelCase : int = fmax
UpperCamelCase : Dict = mel_floor
UpperCamelCase : Any = reduction_factor
UpperCamelCase : List[str] = win_length * sampling_rate // 1000
UpperCamelCase : Union[str, Any] = hop_length * sampling_rate // 1000
UpperCamelCase : Tuple = optimal_fft_length(self.sample_size )
UpperCamelCase : int = (self.n_fft // 2) + 1
UpperCamelCase : Any = window_function(window_length=self.sample_size, name=self.win_function, periodic=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney', )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCamelCase : Dict = np.array(SCREAMING_SNAKE_CASE_, np.intaa )
UpperCamelCase : int = []
for vector, length in zip(SCREAMING_SNAKE_CASE_, attention_mask.sum(-1 ) ):
UpperCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCamelCase : int = spectrogram(
SCREAMING_SNAKE_CASE_, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='log10', )
return log_mel_spec.T
def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
UpperCamelCase : Dict = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCamelCase : str = None
if audio_target is not None:
UpperCamelCase : str = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if inputs is None:
return inputs_target
else:
UpperCamelCase : Dict = inputs_target['input_values']
UpperCamelCase : str = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase : str = decoder_attention_mask
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
UpperCamelCase : Any = isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : str = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase : Optional[int] = [self._extract_mel_features(SCREAMING_SNAKE_CASE_ ) for waveform in speech]
UpperCamelCase : Union[str, Any] = BatchFeature({'input_values': features} )
UpperCamelCase : List[str] = self.num_mel_bins
else:
UpperCamelCase : Dict = BatchFeature({'input_values': speech} )
UpperCamelCase : Tuple = self.pad(
SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : int = feature_size_hack
# convert input values to correct format
UpperCamelCase : Optional[int] = padded_inputs['input_values']
if not isinstance(input_values[0], np.ndarray ):
UpperCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase : Optional[int] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase : Dict = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase : Dict = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase : int = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase : Dict = (
attention_mask
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['input_values'], attention_mask=SCREAMING_SNAKE_CASE_, padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
def snake_case_ ( self ) -> Dict[str, Any]:
UpperCamelCase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase : Any = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 103 | 0 |
from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ) -> None:
'''simple docstring'''
if start is None:
UpperCamelCase = 0
if end is None:
UpperCamelCase = len(__A ) - 1
if start >= end:
return
UpperCamelCase = (start + end) // 2
slowsort(__A , __A , __A )
slowsort(__A , mid + 1 , __A )
if sequence[end] < sequence[mid]:
UpperCamelCase , UpperCamelCase = sequence[mid], sequence[end]
slowsort(__A , __A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 343 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = '''char'''
UpperCamelCase__ : Optional[int] = '''bpe'''
UpperCamelCase__ : List[Any] = '''wp'''
lowerCAmelCase__ : str =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase__ : Optional[int] = '''ViTImageProcessor'''
UpperCamelCase__ : Optional[Any] = '''MgpstrTokenizer'''
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('gpt2' )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_A , _A )
def __call__( self , _A=None , _A=None , _A=None , **_A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequences
__SCREAMING_SNAKE_CASE = char_preds.size(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(_A , 'char' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(_A , 'bpe' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(_A , 'wp' )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i in range(_A ):
__SCREAMING_SNAKE_CASE = [char_scores[i], bpe_scores[i], wp_scores[i]]
__SCREAMING_SNAKE_CASE = [char_strs[i], bpe_strs[i], wp_strs[i]]
__SCREAMING_SNAKE_CASE = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = final_strs
__SCREAMING_SNAKE_CASE = final_scores
__SCREAMING_SNAKE_CASE = char_strs
__SCREAMING_SNAKE_CASE = bpe_strs
__SCREAMING_SNAKE_CASE = wp_strs
return out
def _A ( self , _A , _A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
__SCREAMING_SNAKE_CASE = self.char_decode
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = '[s]'
elif format == DecodeType.BPE:
__SCREAMING_SNAKE_CASE = self.bpe_decode
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = '#'
elif format == DecodeType.WORDPIECE:
__SCREAMING_SNAKE_CASE = self.wp_decode
__SCREAMING_SNAKE_CASE = 102
__SCREAMING_SNAKE_CASE = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = pred_logits.size(0 )
__SCREAMING_SNAKE_CASE = pred_logits.size(1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
__SCREAMING_SNAKE_CASE = preds_index.view(-1 , _A )[:, 1:]
__SCREAMING_SNAKE_CASE = decoder(_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
__SCREAMING_SNAKE_CASE = preds_max_prob[:, 1:]
for index in range(_A ):
__SCREAMING_SNAKE_CASE = preds_str[index].find(_A )
__SCREAMING_SNAKE_CASE = preds_str[index][:pred_eos]
__SCREAMING_SNAKE_CASE = preds_index[index].cpu().tolist()
__SCREAMING_SNAKE_CASE = pred_index.index(_A ) if eos_token in pred_index else -1
__SCREAMING_SNAKE_CASE = preds_max_prob[index][: pred_eos_index + 1]
__SCREAMING_SNAKE_CASE = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def _A ( self , _A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 118 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '''Wav2Vec2FeatureExtractor'''
UpperCamelCase__ : Union[str, Any] = '''AutoTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
@classmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
__SCREAMING_SNAKE_CASE = kwargs.pop('input_features' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('labels' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__SCREAMING_SNAKE_CASE = labels['input_ids']
return input_features
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 118 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
SCREAMING_SNAKE_CASE__:Optional[Any] = """__DUMMY_TRANSFORMERS_USER__"""
SCREAMING_SNAKE_CASE__:Tuple = """Dummy User"""
SCREAMING_SNAKE_CASE__:List[str] = """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"""
SCREAMING_SNAKE_CASE__:Optional[Any] = """https://hub-ci.huggingface.co"""
SCREAMING_SNAKE_CASE__:Dict = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
SCREAMING_SNAKE_CASE__:Any = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
SCREAMING_SNAKE_CASE__:Any = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def _lowerCamelCase( a ):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , a )
@pytest.fixture
def _lowerCamelCase( a ):
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , a )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , a )
@pytest.fixture
def _lowerCamelCase( a ):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , a )
@pytest.fixture
def _lowerCamelCase( a , a ):
HfFolder.save_token(a )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return HfApi(endpoint=a )
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = HfFolder.get_token()
HfFolder.save_token(a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(a )
@pytest.fixture
def _lowerCamelCase( a ):
def _cleanup_repo(a ):
hf_api.delete_repo(a , token=a , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _lowerCamelCase( a ):
@contextmanager
def _temporary_repo(a ):
try:
yield repo_id
finally:
cleanup_repo(a )
return _temporary_repo
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = F"repo_txt_data-{int(time.time() * 1_0E3 )}"
__a = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(a , token=a , repo_type="dataset" , private=a )
hf_api.upload_file(
token=a , path_or_fileobj=str(a ) , path_in_repo="data/text_data.txt" , repo_id=a , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(a , token=a , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase( a , a , a ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = F"repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"
__a = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(a , token=a , repo_type="dataset" , private=a )
hf_api.upload_file(
token=a , path_or_fileobj=str(a ) , path_in_repo="data.zip" , repo_id=a , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(a , token=a , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase( a , a , a ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = F"repo_zipped_img_data-{int(time.time() * 1_0E3 )}"
__a = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(a , token=a , repo_type="dataset" , private=a )
hf_api.upload_file(
token=a , path_or_fileobj=str(a ) , path_in_repo="data.zip" , repo_id=a , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(a , token=a , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase( a , a , a ):
return hf_private_dataset_repo_zipped_img_data_
| 261 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 261 | 1 |
"""simple docstring"""
from typing import Any
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: list , lowerCAmelCase: dict , lowerCAmelCase: dict , lowerCAmelCase: dict , )-> list:
_validation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# Creates data structures and fill initial step
_snake_case : dict = {}
_snake_case : dict = {}
for state in states_space:
_snake_case : List[str] = observations_space[0]
_snake_case : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_snake_case : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase ) ):
_snake_case : Optional[Any] = observations_space[o]
_snake_case : List[str] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_snake_case : List[Any] = ''
_snake_case : Optional[int] = -1
for k_state in states_space:
_snake_case : Optional[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_snake_case : List[Any] = probability
_snake_case : str = k_state
# Update probabilities and pointers dicts
_snake_case : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_snake_case : str = arg_max
# The final observation
_snake_case : Dict = observations_space[len(lowerCAmelCase ) - 1]
# argmax for given final observation
_snake_case : List[str] = ''
_snake_case : Optional[int] = -1
for k_state in states_space:
_snake_case : Tuple = probabilities[(k_state, final_observation)]
if probability > max_probability:
_snake_case : Tuple = probability
_snake_case : Optional[Any] = k_state
_snake_case : int = arg_max
# Process pointers backwards
_snake_case : int = last_state
_snake_case : Any = []
for o in range(len(lowerCAmelCase ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase )
_snake_case : Any = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , )-> None:
_validate_not_empty(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
_validate_lists(lowerCAmelCase , lowerCAmelCase )
_validate_dicts(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , )-> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any )-> None:
_validate_list(lowerCAmelCase , 'observations_space' )
_validate_list(lowerCAmelCase , 'states_space' )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: str )-> None:
if not isinstance(_object , lowerCAmelCase ):
_snake_case : Any = F"""{var_name} must be a list"""
raise ValueError(lowerCAmelCase )
else:
for x in _object:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Any , lowerCAmelCase: Any , )-> None:
_validate_dict(lowerCAmelCase , 'initial_probabilities' , lowerCAmelCase )
_validate_nested_dict(lowerCAmelCase , 'transition_probabilities' )
_validate_nested_dict(lowerCAmelCase , 'emission_probabilities' )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: str )-> None:
_validate_dict(_object , lowerCAmelCase , lowerCAmelCase )
for x in _object.values():
_validate_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: str , lowerCAmelCase: type , lowerCAmelCase: bool = False )-> None:
if not isinstance(_object , lowerCAmelCase ):
_snake_case : int = F"""{var_name} must be a dict"""
raise ValueError(lowerCAmelCase )
if not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for x in _object ):
_snake_case : str = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCAmelCase )
if not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for x in _object.values() ):
_snake_case : str = 'nested dictionary ' if nested else ''
_snake_case : Any = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 368 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int | None = None , lowerCAmelCase: int | None = None )-> None:
if start is None:
_snake_case : int = 0
if end is None:
_snake_case : Optional[int] = len(lowerCAmelCase ) - 1
if start >= end:
return
_snake_case : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case : str = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 0 |
import torch
from torch import nn
class lowercase__( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any:
super().__init__()
lowercase_ = n_token
lowercase_ = d_embed
lowercase_ = d_proj
lowercase_ = cutoffs + [n_token]
lowercase_ = [0] + self.cutoffs
lowercase_ = div_val
lowercase_ = self.cutoffs[0]
lowercase_ = len(self.cutoffs ) - 1
lowercase_ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowercase_ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase_ = nn.ModuleList()
lowercase_ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE_ )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , r_idx - l_idx ) )
lowercase_ = keep_order
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
if proj is None:
lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , proj.t().contiguous() )
lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Optional[Any]:
if labels is not None:
# Shift so that tokens < n predict n
lowercase_ = hidden[..., :-1, :].contiguous()
lowercase_ = labels[..., 1:].contiguous()
lowercase_ = hidden.view(-1 , hidden.size(-1 ) )
lowercase_ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
lowercase_ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowercase_ = labels != -1_0_0
lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device )
lowercase_ = (
-nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
else:
# construct weights and biases
lowercase_ , lowercase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE_ )
biases.append(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
if labels is None:
lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device )
lowercase_ = 0
lowercase_ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase_ = (labels >= l_idx) & (labels < r_idx)
lowercase_ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase_ = labels.index_select(0 , SCREAMING_SNAKE_CASE_ ) - l_idx
lowercase_ = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden.index_select(0 , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = hidden
if i == 0:
if labels is not None:
lowercase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
lowercase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowercase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase_ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , SCREAMING_SNAKE_CASE_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
if self.n_clusters == 0:
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
else:
# construct weights and biases
lowercase_ , lowercase_ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ = self.out_layers[i].weight
lowercase_ = self.out_layers[i].bias
if i == 0:
lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(SCREAMING_SNAKE_CASE_ )
biases.append(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0]
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
lowercase_ = [0] + self.cutoffs
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase_ = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i]
lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 )
lowercase_ = head_logprob[:, -i] + tail_logprob_i
lowercase_ = logprob_i
return out
| 30 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 0 |
import random
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = a[left_index]
lowerCAmelCase__ = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase__ ):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ = a[i - 1], a[left_index]
return i - 1
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
if left < right:
lowerCAmelCase__ = random.randint(lowerCAmelCase__ , right - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
quick_sort_random(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n' ).strip()
lowerCAmelCase__ = [int(lowerCAmelCase__ ) for item in user_input.split(',' )]
quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 366 |
def _UpperCamelCase ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowercase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowercase = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
__UpperCamelCase :List[Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , snake_case__ , )
is not None
):
__UpperCamelCase :List[str] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__UpperCamelCase :int = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__UpperCamelCase :Any = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
__UpperCamelCase :Optional[Any] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
__UpperCamelCase :Union[str, Any] = True
if not attribute_used:
__UpperCamelCase :int = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__UpperCamelCase :Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__UpperCamelCase :List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__UpperCamelCase :Union[str, Any] = True
elif attribute.endswith('''_token_id''' ):
__UpperCamelCase :int = True
# configuration class specific cases
if not case_allowed:
__UpperCamelCase :Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__UpperCamelCase :Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
__UpperCamelCase :Any = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
__UpperCamelCase :Union[str, Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__UpperCamelCase :int = {}
if len(config_class.attribute_map ) > 0:
__UpperCamelCase :Any = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__UpperCamelCase :Tuple = inspect.getsourcefile(snake_case__ )
__UpperCamelCase :Union[str, Any] = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__UpperCamelCase :List[str] = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
__UpperCamelCase :List[Any] = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
__UpperCamelCase :Union[str, Any] = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
__UpperCamelCase :Union[str, Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__UpperCamelCase :List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__UpperCamelCase :Tuple = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
__UpperCamelCase :List[Any] = unused_attributes
if len(snake_case__ ) > 0:
__UpperCamelCase :int = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 43 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'upernet'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_8_4 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=2_5_5 , **SCREAMING_SNAKE_CASE_ : str , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = backbone_config.get('''model_type''' )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = backbone_config
lowercase_ = hidden_size
lowercase_ = initializer_range
lowercase_ = pool_scales
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = auxiliary_in_channels
lowercase_ = auxiliary_channels
lowercase_ = auxiliary_num_convs
lowercase_ = auxiliary_concat_input
lowercase_ = loss_ignore_index
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 30 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : int | str )-> bool:
_lowerCamelCase = str(snake_case )
return n == n[::-1]
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000_000 )-> Dict:
_lowerCamelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 80 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( )-> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert _test_patching.open is open
_lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case ):
pass
def SCREAMING_SNAKE_CASE_ ( )-> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case ) is None
with patch_submodule(_test_patching , 'len' , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
_lowerCamelCase = patch_submodule(_test_patching , 'open' , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCamelCase = '__test_patch_submodule_successive_join__'
_lowerCamelCase = '__test_patch_submodule_successive_dirname__'
_lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case ):
pass
| 80 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.