code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : List[Any] = '▁'
snake_case_ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
snake_case_ : str = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
snake_case_ : Dict = {'vinai/bartpho-syllable': 1_024}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = monolingual_vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase = {}
UpperCamelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = cnt
cnt += 1
with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
UpperCamelCase = line.strip().split()[0]
UpperCamelCase = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
UpperCamelCase = len(self.fairseq_tokens_to_ids )
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = ''''''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(lowerCamelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 212 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case_ : Optional[Any] = 'pytorch_model.bin'
snake_case_ : Union[str, Any] = 'pytorch_model.bin.index.json'
snake_case_ : Optional[Any] = 'adapter_config.json'
snake_case_ : List[str] = 'adapter_model.bin'
snake_case_ : Any = 'adapter_model.safetensors'
snake_case_ : Optional[int] = 'tf_model.h5'
snake_case_ : List[Any] = 'tf_model.h5.index.json'
snake_case_ : Any = 'model.ckpt'
snake_case_ : Optional[Any] = 'flax_model.msgpack'
snake_case_ : List[str] = 'flax_model.msgpack.index.json'
snake_case_ : List[str] = 'model.safetensors'
snake_case_ : Any = 'model.safetensors.index.json'
snake_case_ : Any = 'config.json'
snake_case_ : Optional[Any] = 'preprocessor_config.json'
snake_case_ : List[Any] = FEATURE_EXTRACTOR_NAME
snake_case_ : Optional[int] = 'generation_config.json'
snake_case_ : Any = 'modelcard.json'
snake_case_ : Optional[int] = '▁'
snake_case_ : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case_ : Union[str, Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case_ : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case_ : Union[str, Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( _UpperCAmelCase : int):
if version.parse(_UpperCAmelCase) < version.parse(_UpperCAmelCase):
if "dev" in min_version:
UpperCamelCase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''')
| 212 | 1 |
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )[::-1]
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return int(lowerCAmelCase__ ) + int(str(lowerCAmelCase__ )[::-1] )
def UpperCAmelCase ( lowerCAmelCase__ = 1_0000 ):
'''simple docstring'''
__A = []
for num in range(1 , lowerCAmelCase__ ):
__A = 0
__A = num
while iterations < 50:
__A = sum_reverse(lowerCAmelCase__ )
iterations += 1
if is_palindrome(lowerCAmelCase__ ):
break
else:
lychrel_nums.append(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 205 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case_ : str =logging.get_logger(__name__)
snake_case_ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] ={
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Optional[int] ={
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
snake_case_ : List[str] ={
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] = RealmTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> str:
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
__A = getattr(lowercase__ , normalizer_state.pop("type" ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**lowercase__ )
__A = do_lower_case
def _lowerCamelCase ( self , lowercase__ , **lowercase__ ) -> Union[str, Any]:
__A = PaddingStrategy.MAX_LENGTH
__A = text
__A = kwargs.pop("text_pair" , lowercase__ )
__A = kwargs.pop("return_tensors" , lowercase__ )
__A = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowercase__ ):
if batch_text_pair is not None:
__A = batch_text_pair[idx]
else:
__A = None
__A = super().__call__(lowercase__ , lowercase__ , return_tensors=lowercase__ , **lowercase__ )
__A = encoded_candidates.get("input_ids" )
__A = encoded_candidates.get("attention_mask" )
__A = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase__ )
__A = {key: item for key, item in output_data.items() if len(lowercase__ ) != 0}
return BatchEncoding(lowercase__ , tensor_type=lowercase__ )
def _lowerCamelCase ( self , lowercase__ , lowercase__=None ) -> Tuple:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__A = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 205 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE="29500" ):
UpperCamelCase : Any = False
UpperCamelCase : List[Any] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
UpperCamelCase : List[str] = True
elif "IPython" in sys.modules:
UpperCamelCase : Any = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
UpperCamelCase : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
UpperCamelCase : List[str] = 8
UpperCamelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase : List[str] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
UpperCamelCase : Union[str, Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE )
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
| 102 |
from __future__ import annotations
lowerCamelCase__ = "#"
class lowerCAmelCase__ :
def __init__( self ) -> None:
'''simple docstring'''
_UpperCamelCase = {}
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self._trie
for char in text:
if char not in trie:
_UpperCamelCase = {}
_UpperCamelCase = trie[char]
_UpperCamelCase = True
def A_ ( self , a ) -> tuple | list:
'''simple docstring'''
_UpperCamelCase = self._trie
for char in prefix:
if char in trie:
_UpperCamelCase = trie[char]
else:
return []
return self._elements(a )
def A_ ( self , a ) -> tuple:
'''simple docstring'''
_UpperCamelCase = []
for c, v in d.items():
_UpperCamelCase = [""" """] if c == END else [(c + s) for s in self._elements(a )]
result.extend(a )
return tuple(a )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def __A(lowerCAmelCase ) -> tuple:
"""simple docstring"""
_UpperCamelCase = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def __A() -> None:
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 612 | 0 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__SCREAMING_SNAKE_CASE : str = ['a', 'b', 'c', 'd', 'e']
def _snake_case ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
__a : Optional[int] = start
# add current to visited
visited.append(A_ )
__a : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Optional[Any] = topological_sort(A_ , A_ , A_ )
# if all neighbors visited add current to sort
sort.append(A_ )
# if all vertices haven't been visited select a new one to visit
if len(A_ ) != len(A_ ):
for vertice in vertices:
if vertice not in visited:
__a : int = topological_sort(A_ , A_ , A_ )
# return sort
return sort
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = topological_sort('a', [], [])
print(sort)
| 710 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowercase_ = None
lowercase_ = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowercase_ = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=256 ) -> List[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
with open(_UpperCAmelCase , 'r' ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ) -> List[Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_a = os.path.join(_UpperCAmelCase , 'tmp' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_a = read_json(os.path.join(_UpperCAmelCase , 'params.json' ) )
_a = NUM_SHARDS[model_size]
_a = params["n_layers"]
_a = params["n_heads"]
_a = n_heads // num_shards
_a = params["dim"]
_a = dim // n_heads
_a = 10000.0
_a = 1.0 / (base ** (torch.arange(0 , _UpperCAmelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a = params["n_kv_heads"] # for GQA / MQA
_a = n_heads_per_shard // num_key_value_heads
_a = dim // num_key_value_heads
else: # compatibility with other checkpoints
_a = n_heads
_a = n_heads_per_shard
_a = dim
# permute for sliced rotary
def permute(_UpperCAmelCase , _UpperCAmelCase=n_heads , _UpperCAmelCase=dim , _UpperCAmelCase=dim ):
return w.view(_UpperCAmelCase , dima // n_heads // 2 , 2 , _UpperCAmelCase ).transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a = torch.load(os.path.join(_UpperCAmelCase , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_a = [
torch.load(os.path.join(_UpperCAmelCase , f"""consolidated.{i:02d}.pth""" ) , map_location='cpu' )
for i in range(_UpperCAmelCase )
]
_a = 0
_a = {"weight_map": {}}
for layer_i in range(_UpperCAmelCase ):
_a = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_a = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
_a = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(_UpperCAmelCase )
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase ) )
_a = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(_UpperCAmelCase )
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_a = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(_UpperCAmelCase )
] , dim=0 , ).reshape(_UpperCAmelCase , _UpperCAmelCase )
_a = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_UpperCAmelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_UpperCAmelCase )] , dim=0 )
_a = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_UpperCAmelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_UpperCAmelCase )] , dim=0 )
_a = inv_freq
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
_a = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_a = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
_a = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_UpperCAmelCase )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]['output.weight'] for i in range(_UpperCAmelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
# Write configs
_a = {"total_size": param_count * 2}
write_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'pytorch_model.bin.index.json' ) )
_a = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
_a = params["multiple_of"] if "multiple_of" in params else 256
_a = LlamaConfig(
hidden_size=_UpperCAmelCase , intermediate_size=compute_intermediate_size(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_UpperCAmelCase , )
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_a = LlamaForCausalLM.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_UpperCAmelCase , safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
# Initialize the tokenizer based on the `spm` model
_a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
_a = tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_a = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_UpperCAmelCase , help='Whether or not to save using `safetensors`.' )
_a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_a = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 562 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = """ibert"""
def __init__( self : str , _UpperCamelCase : Optional[Any]=30_522 , _UpperCamelCase : List[Any]=768 , _UpperCamelCase : str=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : str=512 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=1e-12 , _UpperCamelCase : Any=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=2 , _UpperCamelCase : Any="absolute" , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Optional[int]="none" , **_UpperCamelCase : Dict , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: Dict = vocab_size
_lowercase: int = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Tuple = hidden_act
_lowercase: str = intermediate_size
_lowercase: List[str] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: Tuple = type_vocab_size
_lowercase: List[str] = initializer_range
_lowercase: Optional[int] = layer_norm_eps
_lowercase: Optional[int] = position_embedding_type
_lowercase: Any = quant_mode
_lowercase: Dict = force_dequant
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
_lowercase: List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 226 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = '''speech_to_text'''
_UpperCamelCase : List[str] = ['''past_key_values''']
_UpperCamelCase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , lowerCamelCase_ : Tuple=1_00_00 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]="relu" , lowerCamelCase_ : Any=2_56 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : int=True , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=60_00 , lowerCamelCase_ : Any=10_24 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Union[str, Any]=(5, 5) , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : Union[str, Any]=80 , lowerCamelCase_ : str=1 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Union[str, Any] = d_model
_snake_case : List[str] = encoder_ffn_dim
_snake_case : List[Any] = encoder_layers
_snake_case : Union[str, Any] = encoder_attention_heads
_snake_case : Any = decoder_ffn_dim
_snake_case : Dict = decoder_layers
_snake_case : Tuple = decoder_attention_heads
_snake_case : int = dropout
_snake_case : Optional[Any] = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : Optional[Any] = activation_function
_snake_case : int = init_std
_snake_case : Dict = encoder_layerdrop
_snake_case : Optional[Any] = decoder_layerdrop
_snake_case : Dict = use_cache
_snake_case : Any = encoder_layers
_snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_source_positions
_snake_case : List[Any] = max_target_positions
_snake_case : int = num_conv_layers
_snake_case : Any = list(lowerCAmelCase_ )
_snake_case : str = conv_channels
_snake_case : str = input_feat_per_channel
_snake_case : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( UpperCAmelCase , unittest.TestCase ):
a_ = BioGptTokenizer
a_ = False
def snake_case__ ( self : Any ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__a ) )
def snake_case__ ( self : Dict , __a : Any ) -> List[Any]:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
__UpperCAmelCase = '''lower'''
__UpperCAmelCase = ['''low''', '''er</w>''']
__UpperCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__UpperCAmelCase = tokens + ['''<unk>''']
__UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 654 | '''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
"""simple docstring"""
import numpy as np
def UpperCamelCase ( _A ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase ( _A ) -> np.ndarray:
return vector * sigmoid(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def UpperCamelCase ( _A = "laptop" ) -> DataFrame:
lowercase : List[str] = F"""https://www.amazon.in/laptop/s?k={product}"""
lowercase : str = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowercase : str = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
lowercase : Optional[Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowercase : str = item.ha.text
lowercase : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowercase : Tuple = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowercase : Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowercase : Tuple = """Not available"""
try:
lowercase : Any = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowercase : Any = """"""
try:
lowercase : List[str] = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
lowercase : Tuple = float("""nan""" )
except AttributeError:
pass
lowercase : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase : Tuple = """ """
lowercase : List[str] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 264 | 1 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , __a : int = 1_0_1 ) -> str:
__UpperCAmelCase = length
def __len__( self : Tuple ) -> List[str]:
return self.length
def __getitem__( self : List[Any] , __a : Any ) -> int:
return i
class A :
def __call__( self : List[str] , __a : Any ) -> int:
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class A ( nn.Module ):
def __init__( self : str ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__UpperCAmelCase = nn.Linear(1_2_0 , 8_0 )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any]=None ) -> Dict:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A ( UpperCAmelCase ):
@require_torch_neuroncore
def snake_case__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A ( UpperCAmelCase ):
@require_torch_multi_gpu
def snake_case__ ( self : int ) -> Any:
__UpperCAmelCase = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""--output_dir {output_dir}""".split()
__UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase : Optional[Any] = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCAmelCase : List[Any] = DummyDataset(dataset_length)
def lowerCAmelCase ( UpperCamelCase__ : EvalPrediction ):
"""simple docstring"""
__UpperCAmelCase = list(range(len(UpperCamelCase__ ) ) )
__UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__lowerCAmelCase : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase : Optional[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase : str = None
| 654 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = os.path.join(args.tf_model_dir , "parameters.json" )
snake_case_ = json.loads(open(_A ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith(".pt" ):
snake_case_ = args.output + ".pt"
snake_case_ = OrderedDict()
with tf.device("/CPU:0" ):
snake_case_ = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ = reader.get_tensor(_A ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case_ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case_ = 8
snake_case_ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/moe" ):
snake_case_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/softmlp/kernel" ):
snake_case_ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case_ = key_name[-9:-7]
for i in range(16 ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/mlp" ):
snake_case_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/p1/bias" ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/p2/kernel" ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/p2/bias" ):
snake_case_ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/ln" ):
snake_case_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/g" ):
snake_case_ = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/att" ):
snake_case_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ = state[:, 0, :, :]
snake_case_ = state[:, 1, :, :]
snake_case_ = state[:, 2, :, :]
snake_case_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case_ = torch.tensor(_A )
snake_case_ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case_ = torch.tensor(_A )
snake_case_ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/o/kernel" ):
snake_case_ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/an" ):
snake_case_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case_ = "model.blocks.%d.self_attn.norm.bias" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif key_name.endswith("/g" ):
snake_case_ = "model.blocks.%d.self_attn.norm.weight" % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case_ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case_ = "model.%s.weight" % nlayer
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(_A )
if key_name.startswith("model/wte" ):
snake_case_ = "lm_head.weight"
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(_A )
elif key_name.startswith("model/wob" ):
snake_case_ = "final_logits_bias"
snake_case_ = vnp.copy() # same in embedded
snake_case_ = state.reshape((1, -1) )
snake_case_ = torch.tensor(_A )
elif key_name == "model/dense/kernel":
snake_case_ = "model.last_project.weight"
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(_A )
elif key_name == "model/dense_1/bias":
snake_case_ = "model.last_project.bias"
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(_A )
torch.save(_A , args.output )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowercase__ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 376 |
import numpy as np
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : List[str] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class a__ ( _UpperCamelCase ):
__lowerCAmelCase = """altclip_text_model"""
def __init__( self , _a=250_002 , _a=1_024 , _a=24 , _a=16 , _a=4_096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=0.0_2 , _a=0.0_2 , _a=1E-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=768 , **_a , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase : Optional[int] = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Union[str, Any] = intermediate_size
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Any = initializer_range
lowercase : Optional[int] = initializer_factor
lowercase : Any = layer_norm_eps
lowercase : int = position_embedding_type
lowercase : List[Any] = use_cache
lowercase : Tuple = project_dim
class a__ ( _UpperCamelCase ):
__lowerCAmelCase = """altclip_vision_model"""
def __init__( self , _a=768 , _a=3_072 , _a=512 , _a=12 , _a=12 , _a=3 , _a=224 , _a=32 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.0_2 , _a=1.0 , **_a , ):
super().__init__(**_UpperCAmelCase )
lowercase : Union[str, Any] = hidden_size
lowercase : int = intermediate_size
lowercase : Optional[int] = projection_dim
lowercase : List[Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : int = num_channels
lowercase : List[Any] = patch_size
lowercase : Union[str, Any] = image_size
lowercase : Dict = initializer_range
lowercase : Any = initializer_factor
lowercase : Union[str, Any] = attention_dropout
lowercase : List[str] = layer_norm_eps
lowercase : Optional[int] = hidden_act
@classmethod
def __magic_name__ ( cls , _a , **_a ):
cls._set_token_in_kwargs(_UpperCAmelCase )
lowercase : Any = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
lowercase : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__ ( _UpperCamelCase ):
__lowerCAmelCase = """altclip"""
__lowerCAmelCase = True
def __init__( self , _a=None , _a=None , _a=768 , _a=2.6_5_9_2 , **_a ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase : List[Any] = kwargs.pop("text_config_dict" , _UpperCAmelCase )
lowercase : str = kwargs.pop("vision_config_dict" , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase : int = {}
# This is the complete result when using `text_config_dict`.
lowercase : str = AltCLIPTextConfig(**_UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase : Optional[Any] = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowercase : int = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase : Tuple = {}
# This is the complete result when using `vision_config_dict`.
lowercase : Optional[Any] = AltCLIPVisionConfig(**_UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase : Optional[int] = {
str(_UpperCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase : str = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
lowercase : Optional[Any] = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase : Dict = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
lowercase : Any = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
lowercase : Optional[Any] = AltCLIPTextConfig(**_UpperCAmelCase )
lowercase : Dict = AltCLIPVisionConfig(**_UpperCAmelCase )
lowercase : Dict = projection_dim
lowercase : int = logit_scale_init_value
lowercase : int = 1.0
@classmethod
def __magic_name__ ( cls , _a , _a , **_a ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def __magic_name__ ( self ):
lowercase : Dict = copy.deepcopy(self.__dict__ )
lowercase : int = self.text_config.to_dict()
lowercase : List[str] = self.vision_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 713 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
_A : Dict = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 518 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ):
_UpperCamelCase : str = """swin"""
_UpperCamelCase : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(**snake_case )
a__ : str = image_size
a__ : int = patch_size
a__ : Union[str, Any] = num_channels
a__ : List[str] = embed_dim
a__ : Optional[int] = depths
a__ : Union[str, Any] = len(snake_case )
a__ : int = num_heads
a__ : Any = window_size
a__ : Union[str, Any] = mlp_ratio
a__ : Dict = qkv_bias
a__ : Tuple = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = drop_path_rate
a__ : str = hidden_act
a__ : Any = use_absolute_embeddings
a__ : List[str] = layer_norm_eps
a__ : List[Any] = initializer_range
a__ : Optional[int] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : Union[str, Any] = int(embed_dim * 2 ** (len(snake_case ) - 1) )
a__ : Optional[int] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(snake_case ) + 1 )]
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 112 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ : List[str] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ : Any = """zero2"""
SCREAMING_SNAKE_CASE__ : Dict = """zero3"""
SCREAMING_SNAKE_CASE__ : str = [ZEROa, ZEROa]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ : Optional[Any] = parameterized.to_safe_name("_".join(str(lowerCamelCase ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _UpperCamelCase ):
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> str:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
pass
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ) -> str:
"""simple docstring"""
a__ : Tuple = models[model]
a__ : int = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
a__ : List[Any] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ : str = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
a__ : Dict = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
a__ : Optional[int] = self.get_launcher(snake_case )
a__ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def _snake_case ( self , snake_case=False ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 112 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def A__ ( A : List[Any]):
'''simple docstring'''
if isinstance(A , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(A , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(A):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''')
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 2_55 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase : Tuple = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
UpperCamelCase : Tuple = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase : Tuple = get_size_dict(lowerCamelCase , param_name="crop_size" )
UpperCamelCase : Tuple = do_resize
UpperCamelCase : Any = size
UpperCamelCase : str = do_center_crop
UpperCamelCase : Union[str, Any] = crop_size
UpperCamelCase : Dict = resample
UpperCamelCase : Dict = do_rescale
UpperCamelCase : Any = rescale_factor
UpperCamelCase : Optional[int] = do_normalize
UpperCamelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" in size:
UpperCamelCase : Union[str, Any] = get_resize_output_image_size(lowerCamelCase , size["shortest_edge"] , default_to_square=lowerCamelCase )
elif "height" in size and "width" in size:
UpperCamelCase : Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> List[str]:
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Union[str, Any] = to_numpy_array(lowerCamelCase )
if do_resize:
UpperCamelCase : Tuple = self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase )
if do_center_crop:
UpperCamelCase : Tuple = self.center_crop(lowerCamelCase , size=lowerCamelCase )
if do_rescale:
UpperCamelCase : Union[str, Any] = self.rescale(image=lowerCamelCase , scale=lowerCamelCase )
if do_normalize:
UpperCamelCase : List[str] = self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase )
UpperCamelCase : Any = to_channel_dimension_format(lowerCamelCase , lowerCamelCase )
return image
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : int = resample if resample is not None else self.resample
UpperCamelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Dict = image_std if image_std is not None else self.image_std
UpperCamelCase : Tuple = size if size is not None else self.size
UpperCamelCase : Tuple = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Union[str, Any] = get_size_dict(lowerCamelCase , param_name="crop_size" )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCamelCase : Union[str, Any] = make_batched(lowerCamelCase )
UpperCamelCase : Any = [
[
self._preprocess_image(
image=lowerCamelCase , do_resize=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , do_center_crop=lowerCamelCase , crop_size=lowerCamelCase , do_rescale=lowerCamelCase , rescale_factor=lowerCamelCase , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , data_format=lowerCamelCase , )
for img in video
]
for video in videos
]
UpperCamelCase : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 435 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435 | 1 |
SCREAMING_SNAKE_CASE__ : Dict = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( lowerCamelCase ):
a__ : Any = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a__ : Any = 0
a__ : str = 0
while place < len(lowerCamelCase ):
if (place + 1 < len(lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( lowerCamelCase ):
a__ : List[Any] = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Optional[int] = divmod(lowerCamelCase , lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = ["""input_values""", """attention_mask"""]
def __init__( self , snake_case = 1 , snake_case = 16_000 , snake_case = 0.0 , snake_case = False , snake_case = 80 , snake_case = 16 , snake_case = 64 , snake_case = "hann_window" , snake_case = 1.0 , snake_case = 80 , snake_case = 7_600 , snake_case = 1E-10 , snake_case = 2 , snake_case = True , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Any = do_normalize
a__ : List[str] = return_attention_mask
a__ : List[Any] = num_mel_bins
a__ : List[str] = hop_length
a__ : int = win_length
a__ : List[Any] = win_function
a__ : List[str] = frame_signal_scale
a__ : List[Any] = fmin
a__ : Optional[Any] = fmax
a__ : Union[str, Any] = mel_floor
a__ : Union[str, Any] = reduction_factor
a__ : List[str] = win_length * sampling_rate // 1_000
a__ : List[Any] = hop_length * sampling_rate // 1_000
a__ : List[Any] = optimal_fft_length(self.sample_size )
a__ : Dict = (self.n_fft // 2) + 1
a__ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
a__ : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( snake_case , snake_case , snake_case = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
a__ : Tuple = np.array(snake_case , np.intaa )
a__ : List[str] = []
for vector, length in zip(snake_case , attention_mask.sum(-1 ) ):
a__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : List[str] = padding_value
normed_input_values.append(snake_case )
else:
a__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : str = spectrogram(
snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if audio_target is not None:
a__ : List[Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
if inputs is None:
return inputs_target
else:
a__ : Tuple = inputs_target["input_values"]
a__ : Tuple = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Tuple = decoder_attention_mask
return inputs
def _snake_case ( self , snake_case , snake_case = False , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
a__ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a__ : List[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : Any = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a__ : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
a__ : List[str] = [self._extract_mel_features(snake_case ) for waveform in speech]
a__ : Optional[Any] = BatchFeature({"input_values": features} )
a__ : str = self.num_mel_bins
else:
a__ : int = BatchFeature({"input_values": speech} )
a__ : int = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
a__ : Any = feature_size_hack
# convert input values to correct format
a__ : Tuple = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : Tuple = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Any = (
attention_mask
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
a__ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : str = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 112 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 701 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__A = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
snake_case_ = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = config.window_size**2
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
snake_case_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A ( __UpperCamelCase , __UpperCamelCase=7 ) -> Optional[int]:
A__ = None
if token is not None:
A__ = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
A__ = '636036'
A__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
A__ = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
return result["workflow_runs"]
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = get_daily_ci_runs(__UpperCamelCase )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run['id']
break
return workflow_run_id
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
A__ = get_last_daily_ci_runs(__UpperCamelCase )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=__UpperCamelCase , token=__UpperCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCamelCase , artifact_url=__UpperCamelCase , output_dir=__UpperCamelCase , token=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
get_last_daily_ci_artifacts(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(__UpperCamelCase , f'''{artifact_name}.zip''' )
if os.path.isfile(__UpperCamelCase ):
A__ = {}
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
with z.open(__UpperCamelCase ) as f:
A__ = f.read().decode('UTF-8' )
return results
| 9 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 620 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowercase):
snake_case__ : str = "poolformer"
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : List[Any]=1_6 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Optional[Any]=4.0 , __lowerCAmelCase : str=[2, 2, 6, 2] , __lowerCAmelCase : Any=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : Union[str, Any]=[7, 3, 3, 3] , __lowerCAmelCase : List[str]=[4, 2, 2, 2] , __lowerCAmelCase : Optional[int]=[2, 1, 1, 1] , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : int=0.02 , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[Any] = stride
_lowerCamelCase : Union[str, Any] = padding
_lowerCamelCase : Any = pool_size
_lowerCamelCase : int = hidden_sizes
_lowerCamelCase : str = mlp_ratio
_lowerCamelCase : Optional[Any] = depths
_lowerCamelCase : Union[str, Any] = patch_sizes
_lowerCamelCase : List[str] = strides
_lowerCamelCase : Optional[Any] = num_encoder_blocks
_lowerCamelCase : List[str] = drop_path_rate
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Optional[int] = use_layer_scale
_lowerCamelCase : Dict = layer_scale_init_value
_lowerCamelCase : Tuple = initializer_range
super().__init__(**__lowerCAmelCase )
class __snake_case ( _lowercase):
snake_case__ : int = version.parse("1.11")
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return 2E-3
| 716 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : Dict, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : int = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : int=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Optional[int] = ''''''
else:
_lowerCamelCase : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowerCamelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Tuple = in_proj_bias[-config.hidden_size :]
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : int, A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = dct.pop(A_ )
_lowerCamelCase : List[Any] = val
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : str=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = BitConfig(
global_padding='''same''', layer_type='''bottleneck''', depths=(3, 4, 9), out_features=['''stage3'''], embedding_dynamic_padding=A_, )
_lowerCamelCase : Any = ViTHybridConfig(backbone_config=A_, image_size=3_84, num_labels=10_00 )
_lowerCamelCase : Optional[Any] = False
# load original model from timm
_lowerCamelCase : Any = timm.create_model(A_, pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
_lowerCamelCase : int = create_rename_keys(A_, A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
read_in_q_k_v(A_, A_, A_ )
_lowerCamelCase : Optional[Any] = '''huggingface/label-files'''
_lowerCamelCase : Tuple = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[Any] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : List[Any] = ViTHybridModel(A_ ).eval()
else:
_lowerCamelCase : Dict = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
_lowerCamelCase : Any = create_transform(**resolve_data_config({}, model=A_ ) )
_lowerCamelCase : str = transform.transforms
_lowerCamelCase : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_lowerCamelCase : Any = ViTHybridImageProcessor(
do_resize=A_, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=A_, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=A_, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : int = transform(A_ ).unsqueeze(0 )
_lowerCamelCase : Any = processor(A_, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A_, A_ )
# verify logits
with torch.no_grad():
_lowerCamelCase : Tuple = model(A_ )
_lowerCamelCase : List[Any] = outputs.logits
print('''Predicted class:''', logits.argmax(-1 ).item() )
if base_model:
_lowerCamelCase : List[Any] = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_, outputs.pooler_output, atol=1E-3 )
else:
_lowerCamelCase : str = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A__ = logging.getLogger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=16 , __lowerCAmelCase = 10 , __lowerCAmelCase = 2 ) -> int:
"""simple docstring"""
def get_dataset(__lowerCAmelCase ):
snake_case__ : Dict = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
snake_case__ : Optional[Any] = get_dataset(__lowerCAmelCase )
snake_case__ : Any = get_dataset(__lowerCAmelCase )
snake_case__ : Tuple = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
snake_case__ : Optional[int] = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> int:
"""simple docstring"""
snake_case__ : Tuple = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
snake_case__ , snake_case__ : List[str] = batch
snake_case__ : Union[str, Any] = model(__lowerCAmelCase )
snake_case__ : List[str] = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
def __init__( self :Union[str, Any] ):
super().__init__()
snake_case__ : int = nn.Parameter(torch.randn(1 ) )
snake_case__ : Any = nn.Parameter(torch.randn(1 ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :str ):
return x * self.a + self.b
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Optional[int] = DummyModel()
snake_case__ : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ , snake_case__ : Optional[Any] = dummy_dataloaders()
snake_case__ : List[Any] = ProjectConfiguration(total_limit=1 ,project_dir=__lowercase ,automatic_checkpoint_naming=__lowercase )
# Train baseline
snake_case__ : Any = Accelerator(project_config=__lowercase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def __lowerCamelCase ( self :List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Any = DummyModel()
snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ , snake_case__ : List[Any] = dummy_dataloaders()
# Train baseline
snake_case__ : Optional[int] = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save initial
snake_case__ : Tuple = os.path.join(__lowercase ,'''initial''' )
accelerator.save_state(__lowercase )
((snake_case__) , (snake_case__)) : List[str] = model.a.item(), model.b.item()
snake_case__ : int = optimizer.state_dict()
snake_case__ : List[Any] = train(3 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
((snake_case__) , (snake_case__)) : Union[str, Any] = model.a.item(), model.b.item()
snake_case__ : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : str = DummyModel()
snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ , snake_case__ : Union[str, Any] = dummy_dataloaders()
snake_case__ : Tuple = Accelerator()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
accelerator.load_state(__lowercase )
((snake_case__) , (snake_case__)) : Optional[Any] = model.a.item(), model.b.item()
snake_case__ : List[Any] = optimizer.state_dict()
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
snake_case__ : int = train(2 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save everything
snake_case__ : Tuple = os.path.join(__lowercase ,'''checkpoint''' )
accelerator.save_state(__lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowercase )
test_rands += train(1 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
((snake_case__) , (snake_case__)) : Optional[int] = model.a.item(), model.b.item()
snake_case__ : Dict = optimizer.state_dict()
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : List[Any] = DummyModel()
snake_case__ : Optional[Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ , snake_case__ : Tuple = dummy_dataloaders()
snake_case__ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
snake_case__ : Optional[Any] = Accelerator(project_dir=__lowercase ,project_config=__lowercase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save initial
accelerator.save_state()
((snake_case__) , (snake_case__)) : List[Any] = model.a.item(), model.b.item()
snake_case__ : List[str] = optimizer.state_dict()
snake_case__ : Optional[int] = train(3 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
((snake_case__) , (snake_case__)) : Optional[Any] = model.a.item(), model.b.item()
snake_case__ : int = optimizer.state_dict()
# Train partially
set_seed(4_2 )
snake_case__ : Optional[int] = DummyModel()
snake_case__ : str = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ , snake_case__ : Any = dummy_dataloaders()
snake_case__ : Any = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__lowercase )
snake_case__ : Any = Accelerator(project_dir=__lowercase ,project_config=__lowercase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
accelerator.load_state(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_0''' ) )
((snake_case__) , (snake_case__)) : List[str] = model.a.item(), model.b.item()
snake_case__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
snake_case__ : Optional[int] = train(2 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_1''' ) )
test_rands += train(1 ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
((snake_case__) , (snake_case__)) : List[Any] = model.a.item(), model.b.item()
snake_case__ : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[str] = torch.tensor([1, 2, 3] )
snake_case__ : int = torch.tensor([2, 3, 4] )
snake_case__ : Optional[int] = DummyModel()
snake_case__ : Optional[Any] = torch.optim.Adam(net.parameters() )
snake_case__ : Optional[Any] = Accelerator()
with self.assertRaises(__lowercase ) as ve:
accelerator.register_for_checkpointing(__lowercase ,__lowercase ,__lowercase ,__lowercase )
snake_case__ : Optional[Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __lowerCamelCase ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Tuple = DummyModel()
snake_case__ : List[str] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
snake_case__ : Optional[Any] = torch.optim.lr_scheduler.StepLR(__lowercase ,step_size=1 ,gamma=0.99 )
snake_case__ , snake_case__ : Any = dummy_dataloaders()
snake_case__ : Dict = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
snake_case__ : Any = Accelerator(project_dir=__lowercase ,project_config=__lowercase )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Save initial
accelerator.save_state()
snake_case__ : str = scheduler.state_dict()
train(3 ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
self.assertNotEqual(__lowercase ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_0''' ) )
self.assertEqual(__lowercase ,scheduler.state_dict() )
def __lowerCamelCase ( self :Dict ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
snake_case__ : Dict = DummyModel()
snake_case__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase ,total_limit=2 )
# Train baseline
snake_case__ : int = Accelerator(project_dir=__lowercase ,project_config=__lowercase )
snake_case__ : Optional[int] = accelerator.prepare(__lowercase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase ,'''checkpoints''' ,'''checkpoint_10''' ) ) )
@require_cuda
def __lowerCamelCase ( self :Tuple ):
snake_case__ : int = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
if __name__ == "__main__":
A__ = '''/tmp/accelerate/state_checkpointing'''
A__ = DummyModel()
A__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
A__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A__ , A__ = dummy_dataloaders()
A__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A__ , A__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A__ = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
A__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
A__ = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
A__ = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 252 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class a ( __lowerCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCAmelCase : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__lowerCAmelCase : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
__lowerCAmelCase : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
__lowerCAmelCase : str = "question"
__lowerCAmelCase : str = "context"
__lowerCAmelCase : str = "answers"
@property
def __lowerCamelCase ( self :Any ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 252 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowerCamelCase_ ( __lowerCAmelCase = 150_0000 ) -> int:
'''simple docstring'''
lowerCamelCase__ =defaultdict(__lowerCAmelCase )
lowerCamelCase__ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowerCAmelCase , 2 ):
if gcd(__lowerCAmelCase , __lowerCAmelCase ) > 1:
continue
lowerCamelCase__ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132 | """simple docstring"""
import heapq
import sys
import numpy as np
a =tuple[int, int]
class __UpperCAmelCase :
def __init__( self ):
lowerCamelCase__ =[]
lowerCamelCase__ =set()
def _a ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _a ( self ):
return len(self.elements ) == 0
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCamelCase )
else:
# update
# print("update", item)
lowerCamelCase__ =[]
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self , _lowerCamelCase ):
if item in self.set:
self.set.remove(_lowerCamelCase )
lowerCamelCase__ =[]
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self ):
return self.elements[0][1]
def _a ( self ):
((lowerCamelCase__) , (lowerCamelCase__)) =heapq.heappop(self.elements )
self.set.remove(_lowerCamelCase )
return (priority, item)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =np.array(__lowerCAmelCase )
lowerCamelCase__ =np.array(__lowerCAmelCase )
return np.linalg.norm(a - b )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
return consistent_heuristic(__lowerCAmelCase , __lowerCAmelCase ) // t
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ =g_function[start] + Wa * heuristics[i](__lowerCAmelCase , __lowerCAmelCase )
return ans
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =np.chararray((n, n) )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowerCamelCase__ ="*"
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ ="#"
lowerCamelCase__ ="-"
lowerCamelCase__ =back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) =x
# print(x)
lowerCamelCase__ ="-"
lowerCamelCase__ =back_pointer[x]
lowerCamelCase__ ="-"
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowerCamelCase__ =back_pointer[goal]
while x != start:
print(__lowerCAmelCase , end=" " )
lowerCamelCase__ =back_pointer[x]
print(__lowerCAmelCase )
sys.exit()
def lowerCamelCase_ ( __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
for itera in range(__lowerCAmelCase ):
open_list[itera].remove_element(__lowerCAmelCase )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) =s
lowerCamelCase__ =(x - 1, y)
lowerCamelCase__ =(x + 1, y)
lowerCamelCase__ =(x, y + 1)
lowerCamelCase__ =(x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowerCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowerCAmelCase )
lowerCamelCase__ =-1
lowerCamelCase__ =float("inf" )
if valid(__lowerCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ =g_function[s] + 1
lowerCamelCase__ =s
if neighbours not in close_list_anchor:
open_list[0].put(__lowerCAmelCase , key(__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowerCAmelCase ):
if key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) <= Wa * key(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase ):
open_list[j].put(
__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase__ =[]
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a ={0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a =[
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a =make_common_ground()
a =blocks_blk
# hyper parameters
a =1
a =1
a =20
a =3 # one consistent and two other inconsistent
# start and end destination
a =(0, 0)
a =(n - 1, n - 1)
a =1
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ ={start: 0, goal: float("inf" )}
lowerCamelCase__ ={start: -1, goal: -1}
lowerCamelCase__ =[]
lowerCamelCase__ =set()
for i in range(__lowerCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowerCAmelCase , key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =[]
lowerCamelCase__ =[]
while open_list[0].minkey() < float("inf" ):
for i in range(1 , __lowerCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ , lowerCamelCase__ =open_list[i].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_inad.append(__lowerCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowerCamelCase__ =open_list[0].top_show()
visited.add(__lowerCAmelCase )
expand_state(
__lowerCAmelCase , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
close_list_anchor.append(__lowerCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowerCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 132 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None ):
# Recurse if needed
if "." in tensor_name:
_A : Optional[int] = tensor_name.split(""".""" )
for split in splits[:-1]:
_A : int = getattr(snake_case_,snake_case_ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
_A : int = new_module
_A : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
_A : Any = tensor_name in module._buffers
_A : Optional[int] = getattr(snake_case_,snake_case_ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
_A : Union[str, Any] = False
_A : Dict = False
if is_buffer or not is_bitsandbytes_available():
_A : Optional[Any] = False
_A : Any = False
else:
_A : List[str] = hasattr(bnb.nn,"""Params4bit""" ) and isinstance(module._parameters[tensor_name],bnb.nn.Paramsabit )
_A : List[str] = isinstance(module._parameters[tensor_name],bnb.nn.IntaParams )
if is_abit or is_abit:
_A : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_A : Dict = old_value.to(snake_case_ )
elif isinstance(snake_case_,torch.Tensor ):
_A : Optional[Any] = value.to("""cpu""" )
if value.dtype == torch.inta:
_A : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_A : Optional[Any] = torch.tensor(snake_case_,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls,snake_case_ ) and fpaa_statistics is None:
_A : Union[str, Any] = new_value.T
_A : Optional[Any] = old_value.__dict__
if is_abit:
_A : Optional[Any] = bnb.nn.IntaParams(snake_case_,requires_grad=snake_case_,**snake_case_ ).to(snake_case_ )
elif is_abit:
_A : Optional[Any] = bnb.nn.Paramsabit(snake_case_,requires_grad=snake_case_,**snake_case_ ).to(snake_case_ )
_A : Any = new_value
if fpaa_statistics is not None:
setattr(module.weight,"""SCB""",fpaa_statistics.to(snake_case_ ) )
else:
if value is None:
_A : Any = old_value.to(snake_case_ )
elif isinstance(snake_case_,torch.Tensor ):
_A : List[str] = value.to(snake_case_ )
else:
_A : Union[str, Any] = torch.tensor(snake_case_,device=snake_case_ )
if is_buffer:
_A : Optional[int] = new_value
else:
_A : Optional[int] = nn.Parameter(snake_case_,requires_grad=old_value.requires_grad )
_A : Optional[int] = new_value
def lowerCAmelCase_ ( snake_case_,snake_case_=None,snake_case_=None,snake_case_=None,snake_case_=False ):
for name, module in model.named_children():
if current_key_name is None:
_A : Union[str, Any] = []
current_key_name.append(snake_case_ )
if (isinstance(snake_case_,nn.Linear ) or isinstance(snake_case_,snake_case_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(snake_case_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case_,snake_case_ ):
_A , _A : str = module.weight.shape
else:
_A : List[Any] = module.in_features
_A : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_A : Optional[int] = bnb.nn.LinearabitLt(
snake_case_,snake_case_,module.bias is not None,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight,threshold=quantization_config.llm_inta_threshold,)
_A : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_A : Any = bnb.nn.Linearabit(
snake_case_,snake_case_,module.bias is not None,quantization_config.bnb_abit_compute_dtype,compress_statistics=quantization_config.bnb_abit_use_double_quant,quant_type=quantization_config.bnb_abit_quant_type,)
_A : Dict = True
# Store the module class in case we need to transpose the weight later
_A : List[str] = type(snake_case_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case_ )
if len(list(module.children() ) ) > 0:
_A , _A : Dict = _replace_with_bnb_linear(
snake_case_,snake_case_,snake_case_,snake_case_,has_been_replaced=snake_case_,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( snake_case_,snake_case_=None,snake_case_=None,snake_case_=None ):
_A : Optional[int] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_A , _A : Any = _replace_with_bnb_linear(
snake_case_,snake_case_,snake_case_,snake_case_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowerCAmelCase_ ( *snake_case_,**snake_case_ ):
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""",snake_case_,)
return replace_with_bnb_linear(*snake_case_,**snake_case_ )
def lowerCAmelCase_ ( *snake_case_,**snake_case_ ):
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""",snake_case_,)
return set_module_quantized_tensor_to_device(*snake_case_,**snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_A : List[Any] = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_,snake_case_ ):
_A : List[Any] = sum(list(tied_params.values() ),[] ) + list(tied_params.keys() )
else:
_A : List[str] = sum(snake_case_,[] )
_A : Any = len(snake_case_ ) > 0
# Check if it is a base model
_A : int = not hasattr(snake_case_,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A : Tuple = list(model.named_children() )
_A : int = [list_modules[-1][0]]
# add last module together with tied weights
_A : Any = set(snake_case_ ) - set(snake_case_ )
_A : Any = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
_A : Tuple = [""".weight""", """.bias"""]
_A : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A : Optional[int] = name.replace(snake_case_,"""""" )
filtered_module_names.append(snake_case_ )
return filtered_module_names
| 307 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]:
_A : List[Any] = parent
_A : List[Any] = batch_size
_A : Dict = seq_length
_A : Optional[Any] = is_training
_A : int = use_attention_mask
_A : int = use_token_type_ids
_A : List[Any] = use_labels
_A : List[str] = vocab_size
_A : List[Any] = hidden_size
_A : str = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = intermediate_size
_A : Any = hidden_act
_A : int = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = type_sequence_label_size
_A : Dict = initializer_range
_A : List[Any] = num_choices
def a__ ( self ) -> int:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_attention_mask:
_A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Optional[int] = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> List[str]:
_A : Tuple = self.prepare_config_and_inputs()
_A , _A , _A , _A : str = config_and_inputs
_A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A , _A : int = config_and_inputs
_A : int = True
_A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def a__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_A : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_a )
_A : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 307 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((lowercase) , (lowercase)) =extended_euclid(lowercase_ , a % b )
lowercase =a // b
return (y, x - k * y)
def UpperCamelCase ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
((lowercase) , (lowercase)) =extended_euclid(lowercase_ , lowercase_ )
lowercase =na * na
lowercase =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
((lowercase) , (lowercase)) =extended_euclid(lowercase_ , lowercase_ )
if b < 0:
lowercase =(b % n + n) % n
return b
def UpperCamelCase ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int:
'''simple docstring'''
lowercase , lowercase =invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
lowercase =na * na
lowercase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 706 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] = None ) -> Tuple:
'''simple docstring'''
lowercase =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase =[flip_channel_order(snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''deberta-v2'''
def __init__( self , _lowercase=1_2_8_1_0_0 , _lowercase=1_5_3_6 , _lowercase=2_4 , _lowercase=2_4 , _lowercase=6_1_4_4 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=0 , _lowercase=0.02 , _lowercase=1E-7 , _lowercase=False , _lowercase=-1 , _lowercase=0 , _lowercase=True , _lowercase=None , _lowercase=0 , _lowercase="gelu" , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Dict = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Dict = initializer_range
snake_case_ : str = relative_attention
snake_case_ : Dict = max_relative_positions
snake_case_ : List[Any] = pad_token_id
snake_case_ : List[Any] = position_biased_input
# Backwards compatibility
if type(_lowercase ) == str:
snake_case_ : Any = [x.strip() for x in pos_att_type.lower().split("""|""" )]
snake_case_ : List[str] = pos_att_type
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = layer_norm_eps
snake_case_ : Dict = kwargs.get("""pooler_hidden_size""" , _lowercase )
snake_case_ : Union[str, Any] = pooler_dropout
snake_case_ : List[str] = pooler_hidden_act
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_2
def UpperCAmelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 4_0 , _lowercase = 4_0 , _lowercase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = super().generate_dummy_inputs(preprocessor=_lowercase , framework=_lowercase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 58 | import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 558 | 0 |
from math import isqrt, loga
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
A : Union[str, Any] = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def __UpperCamelCase ( _lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ) -> int:
"""simple docstring"""
A : List[str] = degree * loga(_lowerCAmelCase )
A : Union[str, Any] = int(_lowerCAmelCase )
A : Dict = calculate_prime_numbers(_lowerCAmelCase )
A : Dict = 0
A : Optional[int] = 0
A : List[str] = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 520 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = ["input_features", "is_longer"]
def __init__( self, lowerCamelCase__=64, lowerCamelCase__=4_8000, lowerCamelCase__=480, lowerCamelCase__=10, lowerCamelCase__=1024, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__ = 0, lowerCamelCase__ = 1_4000, lowerCamelCase__ = None, lowerCamelCase__ = "fusion", lowerCamelCase__ = "repeatpad", **lowerCamelCase__, ):
super().__init__(
feature_size=lowerCamelCase__, sampling_rate=lowerCamelCase__, padding_value=lowerCamelCase__, return_attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = top_db
A : Tuple = truncation
A : Union[str, Any] = padding
A : Optional[int] = fft_window_size
A : Optional[int] = (fft_window_size >> 1) + 1
A : Optional[int] = hop_length
A : List[Any] = max_length_s
A : List[str] = max_length_s * sampling_rate
A : List[str] = sampling_rate
A : Optional[int] = frequency_min
A : int = frequency_max
A : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm=lowerCamelCase__, mel_scale="""htk""", )
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm="""slaney""", mel_scale="""slaney""", )
def _lowerCAmelCase ( self ):
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[str] = spectrogram(
lowerCamelCase__, window_function(self.fft_window_size, """hann""" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase__, log_mel="""dB""", )
return log_mel_spectrogram.T
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A : Union[str, Any] = [0]
# randomly choose index for each part
A : str = np.random.choice(ranges[0] )
A : Optional[Any] = np.random.choice(ranges[1] )
A : int = np.random.choice(ranges[2] )
A : int = mel[idx_front : idx_front + chunk_frames, :]
A : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
A : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
A : Tuple = torch.tensor(mel[None, None, :] )
A : Any = torch.nn.functional.interpolate(
lowerCamelCase__, size=[chunk_frames, 64], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : List[str] = mel_shrink[0][0].numpy()
A : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A : Union[str, Any] = len(lowerCamelCase__ ) - max_length
A : Dict = np.random.randint(0, overflow + 1 )
A : Union[str, Any] = waveform[idx : idx + max_length]
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A : Tuple = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A : Any = np.stack([mel, mel, mel, mel], axis=0 )
A : Optional[Any] = False
else:
A : Tuple = self._random_mel_fusion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
A : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A : List[Any] = int(max_length / len(lowerCamelCase__ ) )
A : List[str] = np.stack(np.tile(lowerCamelCase__, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A : List[Any] = int(max_length / len(lowerCamelCase__ ) )
A : List[str] = np.stack(np.tile(lowerCamelCase__, lowerCamelCase__ ) )
A : Any = np.pad(lowerCamelCase__, (0, max_length - waveform.shape[0]), mode="""constant""", constant_values=0 )
if truncation == "fusion":
A : str = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
A : Optional[int] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Any = truncation if truncation is not None else self.truncation
A : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A : Any = isinstance(lowerCamelCase__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A : Optional[Any] = is_batched_numpy or (
isinstance(lowerCamelCase__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
A : Tuple = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__, np.ndarray ):
A : str = np.asarray(lowerCamelCase__, dtype=np.floataa )
elif isinstance(lowerCamelCase__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[str] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
A : int = [
self._get_input_mel(lowerCamelCase__, max_length if max_length else self.nb_max_samples, lowerCamelCase__, lowerCamelCase__ )
for waveform in raw_speech
]
A : Optional[Any] = []
A : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A : Optional[Any] = np.random.randint(0, len(lowerCamelCase__ ) )
A : Union[str, Any] = True
if isinstance(input_mel[0], lowerCamelCase__ ):
A : List[Any] = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A : Optional[Any] = [[longer] for longer in is_longer]
A : Tuple = {"""input_features""": input_mel, """is_longer""": is_longer}
A : Any = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
A : Dict = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 520 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=56 , lowercase_ : str=True , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=99 , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=2 , lowercase_ : int=2 , lowercase_ : List[str]=7 , lowercase_ : Any="gelu_new" , lowercase_ : List[str]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]="block_sparse" , lowercase_ : Tuple=True , lowercase_ : Dict=False , lowercase_ : Dict=2 , lowercase_ : Dict=3 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
_UpperCamelCase = rescale_embeddings
_UpperCamelCase = attention_type
_UpperCamelCase = use_bias
_UpperCamelCase = block_size
_UpperCamelCase = num_random_blocks
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("google/bigbird-roberta-base")
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_)
_UpperCamelCase = model_class(lowercase_)
@jax.jit
def model_jitted(lowercase_ : Dict , lowercase_ : List[Any]=None , **lowercase_ : Tuple):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_)
with self.subTest("JIT Enabled"):
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str=1e-5 , lowercase_ : int="outputs" , lowercase_ : List[str]=None) -> Tuple:
"""simple docstring"""
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
| 547 | from __future__ import annotations
from collections import Counter
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> None:
"""simple docstring"""
_UpperCamelCase = {}
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : float) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(lowercase_)
if nodea not in self.connections:
self.add_node(lowercase_)
_UpperCamelCase = probability
def __UpperCAmelCase ( self : Any) -> list[str]:
"""simple docstring"""
return list(self.connections)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->dict[str, int]:
'''simple docstring'''
_UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(a__ , a__ , a__ )
_UpperCamelCase = Counter(graph.get_nodes() )
_UpperCamelCase = start
for _ in range(a__ ):
_UpperCamelCase = graph.transition(a__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547 | 1 |
'''simple docstring'''
from itertools import product
def _snake_case ( lowercase , lowercase ) -> list[int]:
__a : Optional[int] = sides_number
__a : Union[str, Any] = max_face_number * dice_number
__a : Optional[Any] = [0] * (max_total + 1)
__a : Dict = 1
__a : str = range(lowercase , max_face_number + 1 )
for dice_numbers in product(lowercase , repeat=lowercase ):
__a : int = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ) -> float:
__a : Tuple = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__a : Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__a : str = 0
__a : Dict = 9
__a : str = 4 * 9
__a : Any = 6
for peter_total in range(lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__a : str = (4**9) * (6**6)
__a : List[Any] = peter_wins_count / total_games_number
__a : List[Any] = round(lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''') | 697 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Any = params
__a : Optional[Any] = np.array(__UpperCamelCase )
__a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCamelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __lowerCamelCase ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Tuple = self.params.max_model_input_size
__a : Union[str, Any] = self.lengths > max_len
logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" )
def divide_chunks(__UpperCamelCase , __UpperCamelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )]
__a : int = []
__a : Union[str, Any] = []
if self.params.mlm:
__a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase )
if sub_s[-1] != sep_id:
__a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
assert len(__UpperCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCamelCase )
new_tok_ids.extend(__UpperCamelCase )
new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] )
__a : Dict = np.array(__UpperCamelCase )
__a : Tuple = np.array(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = len(self )
__a : List[str] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Union[str, Any] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : List[str] = self.params.special_tok_ids["""unk_token"""]
__a : str = len(self )
__a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : Optional[Any] = (unk_occs / self.lengths) < 0.5
__a : List[str] = self.token_ids[indices]
__a : Optional[int] = self.lengths[indices]
__a : Any = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = [t[0] for t in batch]
__a : str = [t[1] for t in batch]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
# Max for paddings
__a : Optional[int] = max(__UpperCamelCase )
# Pad token ids
if self.params.mlm:
__a : int = self.params.special_tok_ids["""pad_token"""]
else:
__a : Tuple = self.params.special_tok_ids["""unk_token"""]
__a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCamelCase )
assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ )
__a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs)
return tk_t, lg_t | 697 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : List[str] ) -> str:
'''simple docstring'''
# Initialise PyTorch model
lowercase__ : Tuple = FunnelConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : Union[str, Any] = FunnelBaseModel(_lowercase ) if base_model else FunnelModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__UpperCamelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
__UpperCamelCase: List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 266 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase: List[Any] = logging.get_logger(__name__)
__UpperCamelCase: Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "luke"
def __init__( self: Union[str, Any], lowerCamelCase_: str=50267, lowerCamelCase_: Dict=500000, lowerCamelCase_: Union[str, Any]=768, lowerCamelCase_: Dict=256, lowerCamelCase_: int=12, lowerCamelCase_: Union[str, Any]=12, lowerCamelCase_: str=3072, lowerCamelCase_: List[Any]="gelu", lowerCamelCase_: int=0.1, lowerCamelCase_: Optional[Any]=0.1, lowerCamelCase_: Any=512, lowerCamelCase_: List[str]=2, lowerCamelCase_: str=0.0_2, lowerCamelCase_: Union[str, Any]=1E-12, lowerCamelCase_: List[Any]=True, lowerCamelCase_: List[Any]=None, lowerCamelCase_: Optional[Any]=1, lowerCamelCase_: Dict=0, lowerCamelCase_: List[Any]=2, **lowerCamelCase_: Optional[Any], ):
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowercase__ : Tuple = vocab_size
lowercase__ : Any = entity_vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : List[Any] = use_entity_aware_attention
lowercase__ : Optional[Any] = classifier_dropout
| 266 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self :List[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Tuple=1_4 ,_UpperCamelCase :Union[str, Any]=7 ,_UpperCamelCase :Any=True ,_UpperCamelCase :List[Any]=True ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :List[Any]=True ,_UpperCamelCase :str=9_9 ,_UpperCamelCase :List[Any]=3_2 ,_UpperCamelCase :List[str]=5 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :Optional[int]=3_7 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Union[str, Any]=1_6 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :List[Any]=0.02 ,_UpperCamelCase :Any=3 ,_UpperCamelCase :Tuple=4 ,_UpperCamelCase :str=None ,):
snake_case_ : Dict = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Tuple = use_token_type_ids
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : Dict = use_labels
snake_case_ : Optional[int] = use_mc_token_ids
snake_case_ : Dict = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Any = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : int = scope
snake_case_ : str = self.vocab_size - 1
def a__ ( self :Optional[int] ):
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Tuple = None
if self.use_token_type_ids:
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case_ : Any = None
if self.use_mc_token_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
snake_case_ : Optional[Any] = None
snake_case_ : str = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
snake_case_ : Any = self.get_config()
snake_case_ : Optional[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a__ ( self :Tuple ):
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int ,*_UpperCamelCase :Union[str, Any] ):
snake_case_ : Tuple = CTRLModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
model(_UpperCamelCase ,token_type_ids=_UpperCamelCase ,head_mask=_UpperCamelCase )
model(_UpperCamelCase ,token_type_ids=_UpperCamelCase )
snake_case_ : Optional[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[Any] ,*_UpperCamelCase :int ):
snake_case_ : Union[str, Any] = CTRLLMHeadModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : Tuple = model(_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self :Optional[int] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : List[str] = config_and_inputs
snake_case_ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def a__ ( self :Optional[int] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ,*_UpperCamelCase :int ):
snake_case_ : Tuple = self.num_labels
snake_case_ : List[Any] = CTRLForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : str = model(_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase : List[str] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase : List[str] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = True
lowercase : Optional[Any] = False
lowercase : int = False
def a__ ( self :Dict ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def a__ ( self :Any ):
snake_case_ : int = CTRLModelTester(self )
snake_case_ : Dict = ConfigTester(self ,config_class=_UpperCamelCase ,n_embd=3_7 )
def a__ ( self :Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
self.config_tester.run_common_tests()
def a__ ( self :Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :int ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = CTRLModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def a__ ( self :str ):
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def a__ ( self :str ):
snake_case_ : Optional[int] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_UpperCamelCase )
snake_case_ : Optional[Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] ,dtype=torch.long ,device=_UpperCamelCase ) # Legal the president is
snake_case_ : List[str] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
snake_case_ : Dict = model.generate(_UpperCamelCase ,do_sample=_UpperCamelCase )
self.assertListEqual(output_ids[0].tolist() ,_UpperCamelCase ) | 267 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = 'luke'
def __init__( self :List[str] ,_UpperCamelCase :Tuple=5_0_2_6_7 ,_UpperCamelCase :Tuple=5_0_0_0_0_0 ,_UpperCamelCase :str=7_6_8 ,_UpperCamelCase :List[Any]=2_5_6 ,_UpperCamelCase :List[str]=1_2 ,_UpperCamelCase :str=1_2 ,_UpperCamelCase :Union[str, Any]=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :Optional[Any]=2 ,_UpperCamelCase :int=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Any=None ,_UpperCamelCase :Any=1 ,_UpperCamelCase :int=0 ,_UpperCamelCase :List[Any]=2 ,**_UpperCamelCase :str ,):
super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Any = vocab_size
snake_case_ : List[Any] = entity_vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[Any] = entity_emb_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Dict = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Tuple = use_entity_aware_attention
snake_case_ : Tuple = classifier_dropout | 267 | 1 |
import math
from datetime import datetime, timedelta
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = year % 19
UpperCamelCase = year % 4
UpperCamelCase = year % 7
UpperCamelCase = math.floor(year / 100 )
UpperCamelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCamelCase = leap_day_inhibits / 4
UpperCamelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 18 )
else:
return datetime(snake_case__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_SCREAMING_SNAKE_CASE = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 537 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[str] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : List[str] = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = tensor[:sequence_length]
else:
_snake_case : int = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[str] = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Dict = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Optional[int], a_: Tuple ):
'''simple docstring'''
import torch
_snake_case : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : int = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : str = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Tuple = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Dict = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : str = [feature["""ner_tags"""] for feature in features]
_snake_case : Tuple = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : Union[str, Any] = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : Optional[Any] = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 609 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Any = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :int = '''resnet'''
lowerCamelCase_ :Tuple = ['''basic''', '''bottleneck''']
def __init__( self , snake_case_=3 , snake_case_=6_4 , snake_case_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , snake_case_=[3, 4, 6, 3] , snake_case_="bottleneck" , snake_case_="relu" , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : str = embedding_size
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = layer_type
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = downsample_in_first_stage
UpperCAmelCase_ : Optional[int] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Tuple = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1E-3
| 389 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case__ : Optional[int] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 389 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self ,a_ ,a_=13 ,a_=7 ,a_=True ,a_=True ,a_=True ,a_=True ,a_=99 ,a_=32 ,a_=5 ,a_=4 ,a_=37 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=16 ,a_=2 ,a_=0.02 ,a_=4 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=a_ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = FlaxRoFormerModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained('junnyu/roformer_chinese_small' ,from_pt=a_ )
lowerCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(a_ )[0]
lowerCAmelCase__ = 5_0000
lowerCAmelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape ,a_ )
lowerCAmelCase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,a_ ,atol=1e-4 ) )
| 193 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["ChineseCLIPFeatureExtractor"]
_lowerCAmelCase : Any = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 193 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = None , __A = None , __A = False , __A = False , __A = None , __A = None , **__A , ) -> List[str]:
super().__init__(
features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
_lowerCAmelCase =Generator(
cache_dir=__A , features=__A , generator=__A , gen_kwargs=__A , **__A , )
def UpperCamelCase__ ( self ) -> List[str]:
# Build iterable dataset
if self.streaming:
_lowerCAmelCase =self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
_lowerCAmelCase =self.builder.as_dataset(
split='train' , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
| 58 | '''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase__ : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ = "lm_head"
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(UpperCamelCase_ )[0].split("." )[-2]
lowerCAmelCase__ = mapped_key.replace("*" , UpperCamelCase_ )
if "weight_g" in name:
lowerCAmelCase__ = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ = "weight_v"
elif "bias" in name:
lowerCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = "weight"
else:
lowerCAmelCase__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ = name.split("." )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase_ )
@torch.no_grad()
def A ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase__ = UniSpeechConfig.from_pretrained(UpperCamelCase_ )
else:
lowerCAmelCase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load_from_json(UpperCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "vocab.json" )
if not os.path.isdir(UpperCamelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase_ ) )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 42
lowerCAmelCase__ = 43
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCamelCase_ , )
lowerCAmelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = UniSpeechForCTC(UpperCamelCase_ )
else:
lowerCAmelCase__ = UniSpeechForPreTraining(UpperCamelCase_ )
if is_finetuned:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
hf_unispeech.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 48 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_SCREAMING_SNAKE_CASE : int = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = '''hopper-medium-v2'''
_SCREAMING_SNAKE_CASE : Any = gym.make(env_name)
_SCREAMING_SNAKE_CASE : int = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
_SCREAMING_SNAKE_CASE : List[str] = env.reset()
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : List[str] = 1000
_SCREAMING_SNAKE_CASE : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_SCREAMING_SNAKE_CASE : Dict = pipeline(obs, planning_horizon=32)
# execute action in environment
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = env.step(denorm_actions)
_SCREAMING_SNAKE_CASE : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
_SCREAMING_SNAKE_CASE : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 472 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : List[Any]=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[Any]=1 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embeddings_size
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = out_indices
SCREAMING_SNAKE_CASE__ = num_groups
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : List[str] ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : str ) -> Dict:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowercase_ ( self : Any ) -> Dict:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowercase_ ( self : Optional[int] ) -> str:
pass
def lowercase_ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowercase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ = layer_type
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowercase_ ( self : List[str] ) -> Dict:
pass
def lowercase_ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[Any] ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitBackbone,) if is_torch_available() else ()
a = BitConfig
a = False
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
| 472 | 1 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return getitem, k
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
return setitem, k, v
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return delitem, k
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
try:
return fun(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
__A = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
__A = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
__A = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
__A = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = HashMap(initial_block_size=4 )
lowerCAmelCase__ :str = {}
for _, (fun, *args) in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(_SCREAMING_SNAKE_CASE ) == str(_SCREAMING_SNAKE_CASE )
assert set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def __A () ->str:
"""simple docstring"""
def is_public(_SCREAMING_SNAKE_CASE ) -> bool:
return not name.startswith('_' )
lowerCAmelCase__ :Tuple = {name for name in dir({} ) if is_public(_SCREAMING_SNAKE_CASE )}
lowerCAmelCase__ :Optional[Any] = {name for name in dir(HashMap() ) if is_public(_SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 93 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : List[Any] = """xlm-prophetnet"""
a_ : Any = ["""past_key_values"""]
a_ : Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , A = 0.1 , A = "gelu" , A = 3_0522 , A = 1024 , A = 4096 , A = 12 , A = 16 , A = 4096 , A = 12 , A = 16 , A = 0.1 , A = 0.1 , A = 512 , A = 0.0_2 , A = True , A = True , A = 0 , A = 2 , A = 32 , A = 128 , A = False , A = 0.0 , A = True , A = 0 , A = 1 , A = 2 , **A , ):
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[Any] = encoder_ffn_dim
_lowerCamelCase : Any = num_encoder_layers
_lowerCamelCase : Any = num_encoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : Optional[int] = num_decoder_layers
_lowerCamelCase : List[str] = num_decoder_attention_heads
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = init_std # Normal(0, this parameter)
_lowerCamelCase : Optional[int] = activation_function
# parameters for xlmprophetnet
_lowerCamelCase : Any = ngram
_lowerCamelCase : Dict = num_buckets
_lowerCamelCase : Dict = relative_max_distance
_lowerCamelCase : Optional[Any] = disable_ngram_loss
_lowerCamelCase : Union[str, Any] = eps
# 3 Types of Dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Optional[int] = dropout
_lowerCamelCase : List[Any] = use_cache
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , add_cross_attention=A , decoder_start_token_id=A , **A , )
@property
def _lowerCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _lowerCAmelCase ( self , A ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 437 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCamelCase_ = random.Random()
def lowerCamelCase_ ( _a : Dict , _a : Union[str, Any]=1.0 , _a : Optional[int]=None , _a : Optional[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ : List[str] = global_rng
UpperCAmelCase_ : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: Dict=400 ,lowerCamelCase_: Any=2000 ,lowerCamelCase_: Tuple=2048 ,lowerCamelCase_: Union[str, Any]=128 ,lowerCamelCase_: Tuple=1 ,lowerCamelCase_: int=512 ,lowerCamelCase_: int=30 ,lowerCamelCase_: Any=44100 ,) -> Optional[int]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : List[str] = min_seq_length
UpperCAmelCase_ : Dict = max_seq_length
UpperCAmelCase_ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ : List[str] = spectrogram_length
UpperCAmelCase_ : str = feature_size
UpperCAmelCase_ : Any = num_audio_channels
UpperCAmelCase_ : Dict = hop_length
UpperCAmelCase_ : Tuple = chunk_length
UpperCAmelCase_ : List[str] = sampling_rate
def A__ ( self: Tuple ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A__ ( self: List[str] ,lowerCamelCase_: Dict=False ,lowerCamelCase_: Optional[int]=False ) -> Any:
def _flatten(lowerCamelCase_: Union[str, Any] ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
UpperCAmelCase_ : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ : Tuple = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[int] = TvltFeatureExtractor
def A__ ( self: List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = TvltFeatureExtractionTester(self )
def A__ ( self: Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase_ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ ,"""sampling_rate""" ) )
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : str = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
UpperCAmelCase_ : Any = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = feat_extract_first.to_dict()
UpperCAmelCase_ : Tuple = feat_extract_second.to_dict()
UpperCAmelCase_ : Tuple = dict_first.pop("""mel_filters""" )
UpperCAmelCase_ : List[str] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = feat_extract_first.to_dict()
UpperCAmelCase_ : Union[str, Any] = feat_extract_second.to_dict()
UpperCAmelCase_ : str = dict_first.pop("""mel_filters""" )
UpperCAmelCase_ : List[str] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Any ) -> int:
# Initialize feature_extractor
UpperCAmelCase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
UpperCAmelCase_ : Optional[int] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ : List[str] = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase_ : List[Any] = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase_ : Union[str, Any] = feature_extractor(
lowerCamelCase_ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ : List[str] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : Any = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def A__ ( self: List[Any] ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase_ : Tuple = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Dict = self._load_datasamples(1 )
UpperCAmelCase_ : Dict = TvltFeatureExtractor()
UpperCAmelCase_ : str = feature_extractor(lowerCamelCase_ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
UpperCAmelCase_ : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase_ ,atol=1e-4 ) )
| 701 |
from __future__ import annotations
from typing import Any
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: int = 6 ) -> None:
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
self.create_linked_list(lowerCamelCase_ )
def A__ ( self: Optional[int] ,lowerCamelCase_: int ) -> None:
UpperCAmelCase_ : List[Any] = Node()
UpperCAmelCase_ : List[str] = current_node
UpperCAmelCase_ : List[Any] = current_node
UpperCAmelCase_ : Any = current_node
for _ in range(1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = Node()
UpperCAmelCase_ : Optional[Any] = current_node
UpperCAmelCase_ : List[str] = previous_node
UpperCAmelCase_ : str = current_node
UpperCAmelCase_ : Dict = self.front
UpperCAmelCase_ : List[Any] = previous_node
def A__ ( self: Any ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A__ ( self: List[str] ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def A__ ( self: Tuple ,lowerCamelCase_: Any ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase_ : str = self.rear.next
if self.rear:
UpperCAmelCase_ : int = data
def A__ ( self: Optional[int] ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase_ : Union[str, Any] = self.front.data
UpperCAmelCase_ : Dict = None
return data
UpperCAmelCase_ : Union[str, Any] = self.front
UpperCAmelCase_ : Optional[int] = old_front.next
UpperCAmelCase_ : Union[str, Any] = old_front.data
UpperCAmelCase_ : Optional[Any] = None
return data
def A__ ( self: str ) -> None:
if self.is_empty():
raise Exception("""Empty Queue""" )
def A__ ( self: int ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ) -> None:
UpperCAmelCase_ : Any | None = None
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'bridgetower_vision_model'
def __init__( self : Optional[int] , __A : int=7_6_8 , __A : Union[str, Any]=1_2 , __A : Union[str, Any]=3 , __A : Any=1_6 , __A : List[Any]=2_8_8 , __A : Any=1 , __A : Optional[int]=1e-05 , __A : Dict=False , __A : Optional[int]=True , __A : int=False , **__A : str , ):
"""simple docstring"""
super().__init__(**__A )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_channels
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = stop_gradient
_lowercase = share_layernorm
_lowercase = remove_last_layer
@classmethod
def snake_case ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
"""simple docstring"""
_lowercase , _lowercase = cls.get_config_dict(__A , **__A )
if config_dict.get("model_type" ) == "bridgetower":
_lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A , **__A )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'bridgetower_text_model'
def __init__( self : Any , __A : List[Any]=5_0_2_6_5 , __A : Tuple=7_6_8 , __A : List[Any]=1_2 , __A : Optional[int]=1_2 , __A : Union[str, Any]=1 , __A : Tuple=3_0_7_2 , __A : List[str]="gelu" , __A : int=0.1 , __A : List[str]=0.1 , __A : Dict=5_1_4 , __A : Union[str, Any]=1 , __A : Any=1e-05 , __A : List[str]=1 , __A : List[Any]=0 , __A : Any=2 , __A : Optional[Any]="absolute" , __A : Dict=True , **__A : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = initializer_factor
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = pad_token_id
_lowercase = bos_token_id
_lowercase = eos_token_id
@classmethod
def snake_case ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
"""simple docstring"""
_lowercase , _lowercase = cls.get_config_dict(__A , **__A )
if config_dict.get("model_type" ) == "bridgetower":
_lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A , **__A )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'bridgetower'
def __init__( self : List[str] , __A : List[str]=True , __A : str="gelu" , __A : List[str]=7_6_8 , __A : List[Any]=1 , __A : str=1e-05 , __A : Any=False , __A : Dict="add" , __A : Optional[Any]=1_2 , __A : Any=6 , __A : Tuple=False , __A : Optional[int]=False , __A : List[Any]=None , __A : List[str]=None , **__A : Optional[Any] , ):
"""simple docstring"""
# TODO: remove this once the Hub files are updated.
_lowercase = kwargs.pop("text_config_dict" , __A )
_lowercase = kwargs.pop("vision_config_dict" , __A )
super().__init__(**__A )
_lowercase = share_cross_modal_transformer_layers
_lowercase = hidden_act
_lowercase = hidden_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = share_link_tower_layers
_lowercase = link_tower_type
_lowercase = num_attention_heads
_lowercase = num_hidden_layers
_lowercase = tie_word_embeddings
_lowercase = init_layernorm_from_vision_encoder
if text_config is None:
_lowercase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
_lowercase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
_lowercase = BridgeTowerTextConfig(**__A )
_lowercase = BridgeTowerVisionConfig(**__A )
@classmethod
def snake_case ( cls : Optional[Any] , __A : BridgeTowerTextConfig , __A : BridgeTowerVisionConfig , **__A : str ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 497 |
'''simple docstring'''
from typing import Any
def A__ ( A_ ) -> list[Any]:
if not input_list:
return []
_lowercase = [input_list.count(A_ ) for value in input_list]
_lowercase = max(A_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 1 |
"""simple docstring"""
from functools import lru_cache
def __magic_name__ ( __snake_case : int ) -> set:
lowercase : List[str] = 2
lowercase : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase__ )
if n > 1:
factors.add(UpperCAmelCase__ )
return factors
@lru_cache
def __magic_name__ ( __snake_case : int ) -> int:
return len(unique_prime_factors(UpperCAmelCase__ ) )
def __magic_name__ ( __snake_case : list ) -> bool:
return len(set(UpperCAmelCase__ ) ) in (0, 1)
def __magic_name__ ( __snake_case : int ) -> list:
lowercase : Tuple = 2
while True:
# Increment each value of a generated range
lowercase : List[Any] = [base + i for i in range(UpperCAmelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase : str = [upf_len(UpperCAmelCase__ ) for x in group]
checker.append(UpperCAmelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase__ ):
return group
# Increment our base variable by 1
base += 1
def __magic_name__ ( __snake_case : int = 4 ) -> int:
lowercase : Union[str, Any] = run(UpperCAmelCase__ )
return results[0] if len(UpperCAmelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 715 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_A : Any = """."""
if __name__ == "__main__":
_A : Any = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
_A : Any = []
_A : List[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
_A : int = line.strip()
_A : Optional[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_A : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 518 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def lowerCAmelCase_( lowercase_ : List[str] ) -> str:
_lowerCamelCase = list(s_dict.keys() )
for key in keys:
_lowerCamelCase = R'''.*/layers_(\d+)'''
_lowerCamelCase = key
if re.match(__a , __a ):
_lowerCamelCase = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __a )
_lowerCamelCase = R'''(encoder|decoder)\/'''
if re.match(__a , __a ):
_lowerCamelCase = re.match(__a , __a ).groups()
if groups[0] == "encoder":
_lowerCamelCase = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __a )
_lowerCamelCase = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __a )
elif groups[0] == "decoder":
_lowerCamelCase = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __a )
_lowerCamelCase = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowerCamelCase = new_key.replace(__a , __a )
print(F"""{key} -> {new_key}""" )
_lowerCamelCase = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCamelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCamelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowerCamelCase = s_dict[key].shape[0]
_lowerCamelCase = s_dict[key]
for idx in range(__a ):
_lowerCamelCase = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__a )
return s_dict
__SCREAMING_SNAKE_CASE : Dict = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def lowerCAmelCase_( lowercase_ : int , lowercase_ : Any ) -> Union[str, Any]:
import regex as re
with open(__a , '''r''' ) as f:
_lowerCamelCase = f.read()
_lowerCamelCase = re.findall(r'''(.*) = ([0-9.]*)''' , __a )
_lowerCamelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowerCamelCase = float(__a ) if '''.''' in value else int(__a )
_lowerCamelCase = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __a )[0]
_lowerCamelCase = str(activation[1] )
_lowerCamelCase = num_experts
_lowerCamelCase = SwitchTransformersConfig(**__a )
return config
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Dict=None , lowercase_ : int="./" , lowercase_ : Union[str, Any]=8 ) -> Dict:
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
_lowerCamelCase = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
_lowerCamelCase = convert_gin_to_config(__a , __a )
else:
_lowerCamelCase = SwitchTransformersConfig.from_pretrained(__a )
_lowerCamelCase = SwitchTransformersForConditionalGeneration(__a )
_lowerCamelCase = flax_params['''target''']
_lowerCamelCase = flatten_dict(__a , sep='''/''' )
_lowerCamelCase = rename_keys(__a )
_lowerCamelCase = unflatten_dict(__a , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a , __a )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 661 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
import math
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = 0 ):
'''simple docstring'''
UpperCamelCase__ = end or len(UpperCamelCase__ )
for i in range(UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = i
UpperCamelCase__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCamelCase__ = array[temp_index - 1]
temp_index -= 1
UpperCamelCase__ = temp_index_value
return array
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int ): # Max Heap
'''simple docstring'''
UpperCamelCase__ = index
UpperCamelCase__ = 2 * index + 1 # Left Node
UpperCamelCase__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCamelCase__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCamelCase__ = right_index
if largest != index:
UpperCamelCase__ , UpperCamelCase__ = array[largest], array[index]
heapify(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
UpperCamelCase__ = len(UpperCamelCase__ )
for i in range(n // 2, -1, -1 ):
heapify(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for i in range(n - 1, 0, -1 ):
UpperCamelCase__ , UpperCamelCase__ = array[0], array[i]
heapify(UpperCamelCase__, 0, UpperCamelCase__ )
return array
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = low
UpperCamelCase__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCamelCase__ , UpperCamelCase__ = array[j], array[i]
i += 1
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return array
UpperCamelCase__ = 2 * math.ceil(math.loga(len(UpperCamelCase__ ) ) )
UpperCamelCase__ = 16
return intro_sort(UpperCamelCase__, 0, len(UpperCamelCase__ ), UpperCamelCase__, UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase__ )
max_depth -= 1
UpperCamelCase__ = median_of_a(UpperCamelCase__, UpperCamelCase__, start + ((end - start) // 2) + 1, end - 1 )
UpperCamelCase__ = partition(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
intro_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = p
return insertion_sort(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = input("""Enter numbers separated by a comma : """).strip()
lowercase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 591 | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('''tpu-config''', description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('''Accelerate tpu-config command''', description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'''Config Arguments''', '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Path to the config file to use for accelerate.''', )
config_args.add_argument(
'''--tpu_name''', default=UpperCamelCase__, help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''', )
config_args.add_argument(
'''--tpu_zone''', default=UpperCamelCase__, help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''', )
UpperCamelCase__ = parser.add_argument_group('''TPU Arguments''', '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''', action='''store_true''', help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''', )
pod_args.add_argument(
'''--command_file''', default=UpperCamelCase__, help='''The path to the file containing the commands to run on the pod on startup.''', )
pod_args.add_argument(
'''--command''', action='''append''', nargs='''+''', help='''A command to run on the pod. Can be passed multiple times.''', )
pod_args.add_argument(
'''--install_accelerate''', action='''store_true''', help='''Whether to install accelerate on the pod. Defaults to False.''', )
pod_args.add_argument(
'''--accelerate_version''', default='''latest''', help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''', )
pod_args.add_argument(
'''--debug''', action='''store_true''', help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase__ ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCamelCase__ = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ), UpperCamelCase__ ):
UpperCamelCase__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file, '''r''' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCamelCase__ ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase__ = '''; '''.join(UpperCamelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(UpperCamelCase__ )}""" )
return
subprocess.run(UpperCamelCase__ )
print('''Successfully setup pod.''' )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(UpperCamelCase__ )
| 591 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : int ) -> bool:
__A : Any = len(A__ )
__A : Dict = len(A__ )
__A : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__A : List[str] = True
for i in range(A__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__A : int = True
if a[i].islower():
__A : Optional[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def __a ( A__ = 1000 ) -> int:
lowerCAmelCase = 3
lowerCAmelCase = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 649 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if number < 0:
return False
_lowerCamelCase : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 386 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCamelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowerCamelCase : Optional[int] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCamelCase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : Tuple = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowerCamelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : List[str] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : Tuple = flax_model.params["encoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : int = tax_attention_key
_lowerCamelCase : Union[str, Any] = tax_attention_out
_lowerCamelCase : str = tax_attention_query
_lowerCamelCase : Dict = tax_attention_value
_lowerCamelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = tax_mlp_wi_a
_lowerCamelCase : int = tax_mlp_wi_a
else:
_lowerCamelCase : str = tax_mlp_wi
_lowerCamelCase : Optional[int] = tax_mlp_wo
_lowerCamelCase : List[str] = tax_mlp_layer_norm
_lowerCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : int = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[str] = tax_encoder_global_rel_embedding
# Assigning
_lowerCamelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : str = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowerCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowerCamelCase : Any = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowerCamelCase : List[str] = tax_enc_dec_attention_module["key"]["kernel"]
_lowerCamelCase : Tuple = tax_enc_dec_attention_module["out"]["kernel"]
_lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["query"]["kernel"]
_lowerCamelCase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : str = flax_model.params["decoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : Tuple = tax_attention_key
_lowerCamelCase : List[str] = tax_attention_out
_lowerCamelCase : Union[str, Any] = tax_attention_query
_lowerCamelCase : Optional[int] = tax_attention_value
_lowerCamelCase : Optional[Any] = tax_pre_attention_layer_norm
_lowerCamelCase : Tuple = tax_enc_dec_attention_key
_lowerCamelCase : List[str] = tax_enc_dec_attention_out
_lowerCamelCase : Tuple = tax_enc_dec_attention_query
_lowerCamelCase : Tuple = tax_enc_dec_attention_value
_lowerCamelCase : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = tax_mlp_wi_a
_lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCamelCase : Dict = tax_mlp_wi
_lowerCamelCase : Union[str, Any] = tax_mlp_wo
_lowerCamelCase : Dict = txa_mlp_layer_norm
_lowerCamelCase : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowerCamelCase : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_lowerCamelCase : int = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCamelCase : Union[str, Any] = tax_model["target"]["token_embedder"]["embedding"]
_lowerCamelCase : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 386 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a_ : List[Any] = None
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ : str = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
a_ : str = {
'google/rembert': 2_56,
}
a_ : List[str] = '▁'
class __lowercase( __snake_case ):
'''simple docstring'''
__a : List[str] = VOCAB_FILES_NAMES
__a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = RemBertTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a=True , __a=False , __a="[CLS]" , __a="[SEP]" , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ):
__lowerCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__lowerCamelCase : Dict = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : Tuple = keep_accents
__lowerCamelCase : Union[str, Any] = vocab_file
__lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def snake_case_ ( self , __a , __a = None ):
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def snake_case_ ( self , __a , __a = None ):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error('Vocabulary path ({}) should be a directory'.format(__a ) )
return
__lowerCamelCase : str = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 594 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=[1, 2, 1] , SCREAMING_SNAKE_CASE=[2, 2, 4] , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=2.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> Optional[int]:
"""simple docstring"""
A : Any = parent
A : List[Any] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Dict = num_channels
A : Union[str, Any] = embed_dim
A : Optional[int] = depths
A : int = num_heads
A : Tuple = window_size
A : str = mlp_ratio
A : Optional[Any] = qkv_bias
A : Optional[int] = hidden_dropout_prob
A : Any = attention_probs_dropout_prob
A : List[Any] = drop_path_rate
A : Tuple = hidden_act
A : Any = use_absolute_embeddings
A : Optional[int] = patch_norm
A : List[str] = layer_norm_eps
A : int = initializer_range
A : Tuple = is_training
A : int = scope
A : List[Any] = use_labels
A : List[str] = type_sequence_label_size
A : int = encoder_stride
A : List[Any] = out_features
A : str = out_indices
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Union[str, Any] = None
if self.use_labels:
A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : str = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
A : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
A : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE ):
A : str = ['''stem''']
A : Dict = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = self.prepare_config_and_inputs()
A, A, A : int = config_and_inputs
A : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = MaskFormerSwinModelTester(self )
A : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A, A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
A : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : List[str] = outputs.hidden_states
A : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
A : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A, A : Any = self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] = 3
A : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A : Optional[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A, A : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ):
A : List[str] = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
A : List[Any] = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : str = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}. Dict has'
F' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
A : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
A : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
A : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
@require_torch
class A ( unittest.TestCase , __snake_case ):
__magic_name__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__magic_name__ = MaskFormerSwinConfig
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Dict = MaskFormerSwinModelTester(self )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : str = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
A : Any = backbone_class(SCREAMING_SNAKE_CASE )
backbone.to(SCREAMING_SNAKE_CASE )
backbone.eval()
A : Dict = backbone(**SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
A : Optional[int] = backbone(**SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
A, A, A : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
A : Dict = backbone(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 634 | 0 |
'''simple docstring'''
def a_ ( ) -> int:
return 1
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : int = 2_00 ) -> int:
return two_pound(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 721 |
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : List[Any] ) -> tuple:
__snake_case , __snake_case , __snake_case : int = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
__snake_case : int = items[random.randint(0 ,len(_UpperCAmelCase ) - 1 )]
__snake_case : List[Any] = 0
__snake_case , __snake_case , __snake_case : Any = _partition(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[str] = len(_UpperCAmelCase )
__snake_case : int = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase ,_UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase ,index - (m + count) )
| 124 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowercase__ ( A_: Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Dict = ['pixel_values']
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""shortest_edge""": 256}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =do_center_crop
__UpperCAmelCase =crop_size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =offset
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : str , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
__UpperCAmelCase =get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size["""shortest_edge"""] , default_to_square=__SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
__UpperCAmelCase =(size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Optional[int]:
__UpperCAmelCase =image.astype(np.floataa )
if offset:
__UpperCAmelCase =image - (scale / 2)
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase =to_numpy_array(__SCREAMING_SNAKE_CASE )
if do_resize:
__UpperCAmelCase =self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE )
if do_center_crop:
__UpperCAmelCase =self.center_crop(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE )
if do_rescale:
__UpperCAmelCase =self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE )
if do_normalize:
__UpperCAmelCase =self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return image
def _a ( self : Any , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =offset if offset is not None else self.offset
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase =image_std if image_std is not None else self.image_std
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__UpperCAmelCase =make_batched(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[
[
self._preprocess_image(
image=__SCREAMING_SNAKE_CASE , do_resize=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size=__SCREAMING_SNAKE_CASE , do_rescale=__SCREAMING_SNAKE_CASE , rescale_factor=__SCREAMING_SNAKE_CASE , offset=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , )
for img in video
]
for video in videos
]
__UpperCAmelCase ={"""pixel_values""": videos}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 68 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Path , __SCREAMING_SNAKE_CASE : Union[str, None] = None , __SCREAMING_SNAKE_CASE : Union[List[str], None] = None , __SCREAMING_SNAKE_CASE : Union[str, List[str], None] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
__UpperCAmelCase =[file for file in os.listdir(__SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )]
if identifier is not None:
__UpperCAmelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for n_ in n_identifier:
__UpperCAmelCase =[file for file in files if n_ not in file]
else:
__UpperCAmelCase =[file for file in files if n_identifier not in file]
__UpperCAmelCase =ignore_files or []
ignore_files.append("""__init__.py""" )
__UpperCAmelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __SCREAMING_SNAKE_CASE )
if only_modules:
__UpperCAmelCase =file.split(""".""" )[0]
try:
__UpperCAmelCase =getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =doctest.DocTestSuite(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =unittest.TextTestRunner().run(__SCREAMING_SNAKE_CASE )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__UpperCAmelCase =doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""modeling"""
__UpperCAmelCase =[
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""tokenization"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase ="""configuration"""
self.analyze_directory(__SCREAMING_SNAKE_CASE , identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
__UpperCAmelCase =Path("""src/transformers""" )
__UpperCAmelCase =["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , n_identifier=__SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =Path("""docs/source""" )
__UpperCAmelCase =["""favicon.ico"""]
self.analyze_directory(__SCREAMING_SNAKE_CASE , ignore_files=__SCREAMING_SNAKE_CASE , only_modules=__SCREAMING_SNAKE_CASE )
| 68 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class _UpperCamelCase ( __lowercase ):
"""simple docstring"""
snake_case_ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case_ = Features({'text': Value('string' )} )
snake_case_ = Features({'labels': ClassLabel} )
snake_case_ = "text"
snake_case_ = "labels"
def _UpperCAmelCase ( self : Optional[int] , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__magic_name__ : Dict = copy.deepcopy(self )
__magic_name__ : str = self.label_schema.copy()
__magic_name__ : Tuple = features[self.label_column]
__magic_name__ : int = label_schema
return task_template
@property
def _UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 720 |
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A = logging.get_logger(__name__)
A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
A = {
"""facebook/blenderbot_small-90M""": 512,
}
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any] , snake_case : Any=None , snake_case : List[str]=None , snake_case : Optional[int]="<|endoftext|>" , snake_case : str="<|endoftext|>" , snake_case : Optional[Any]="<|endoftext|>" , snake_case : Union[str, Any]=False , snake_case : Union[str, Any]=True , **snake_case : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
__magic_name__ : str = add_prefix_space
def _UpperCAmelCase ( self : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[int]=None ) -> int:
'''simple docstring'''
__magic_name__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__magic_name__ : Tuple = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase_ = """__DUMMY_TRANSFORMERS_USER__"""
UpperCamelCase_ = """Dummy User"""
UpperCamelCase_ = """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"""
UpperCamelCase_ = """https://hub-ci.huggingface.co"""
UpperCamelCase_ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCamelCase_ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCamelCase_ = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Tuple ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCAmelCase_ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Any ):
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Any , lowerCamelCase_: List[str] ):
'''simple docstring'''
HfFolder.save_token(UpperCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( ):
'''simple docstring'''
return HfApi(endpoint=UpperCAmelCase_ )
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi ):
'''simple docstring'''
A : List[Any] = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
def _cleanup_repo(lowerCamelCase_: List[str] ):
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
@contextmanager
def _temporary_repo(lowerCamelCase_: Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Optional[int] , lowerCamelCase_: List[Any] ):
'''simple docstring'''
A : int = f"""repo_txt_data-{int(time.time() * 10e3 )}"""
A : Optional[Any] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Any ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Tuple , lowerCamelCase_: Any ):
'''simple docstring'''
A : List[str] = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
A : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: str ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Dict , lowerCamelCase_: Tuple ):
'''simple docstring'''
A : Tuple = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
A : Any = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: List[Any] , lowerCamelCase_: Tuple , lowerCamelCase_: List[Any] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowercase_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowercase_ ) == 1:
return True
__UpperCamelCase = series[1] - series[0]
for index in range(len(lowercase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowercase_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__UpperCamelCase = 0
for val in series:
answer += val
return answer / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : int = "mctct"
def __init__( self : List[Any] , snake_case : Optional[Any]=8065 , snake_case : Optional[int]=1536 , snake_case : Any=36 , snake_case : List[str]=6144 , snake_case : Dict=4 , snake_case : str=384 , snake_case : List[str]=920 , snake_case : Dict=1E-5 , snake_case : Union[str, Any]=0.3 , snake_case : Optional[Any]="relu" , snake_case : str=0.02 , snake_case : Optional[int]=0.3 , snake_case : int=0.3 , snake_case : Any=1 , snake_case : int=0 , snake_case : Union[str, Any]=2 , snake_case : List[Any]=1 , snake_case : Dict=0.3 , snake_case : int=1 , snake_case : Optional[int]=(7,) , snake_case : List[Any]=(3,) , snake_case : Optional[int]=80 , snake_case : List[str]=1 , snake_case : int=None , snake_case : List[str]="sum" , snake_case : Tuple=False , **snake_case : List[str] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layerdrop
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = conv_glu_dim
__UpperCamelCase = conv_dropout
__UpperCamelCase = num_conv_layers
__UpperCamelCase = input_feat_per_channel
__UpperCamelCase = input_channels
__UpperCamelCase = conv_channels
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 375 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
_a = str(abs(UpperCamelCase ) )
_a = [list(UpperCamelCase ) for char in range(len(UpperCamelCase ) )]
for index in range(len(UpperCamelCase ) ):
num_transpositions[index].pop(UpperCamelCase )
return max(
int(''''''.join(list(UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 22 | '''simple docstring'''
from random import randint, random
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 5 , ):
"""simple docstring"""
__UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
__UpperCAmelCase = 0
__UpperCAmelCase = max(UpperCamelCase__ , 0 )
while i < number_of_cells:
__UpperCAmelCase = (
randint(0 , UpperCamelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(UpperCamelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCamelCase__ , -1 )
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(UpperCamelCase__ )
# Beforce calculations, the highway is empty
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCAmelCase = min(highway_now[car_index] + 1 , UpperCamelCase__ )
# Number of empty cell before the next car
__UpperCAmelCase = get_distance(UpperCamelCase__ , UpperCamelCase__ ) - 1
# We can't have the car causing an accident
__UpperCAmelCase = min(next_highway[car_index] , UpperCamelCase__ )
if random() < probability:
# Randomly, a driver will slow down
__UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = len(highway[0] )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = update(highway[i] , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = [-1] * number_of_cells
for car_index in range(UpperCamelCase__ ):
__UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCAmelCase = speed
highway.append(UpperCamelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
a_ = TypeVar('_T')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
def __init__( self : List[Any] , __lowercase : Iterable[_T] | None = None ) -> None:
SCREAMING_SNAKE_CASE__ : list[_T] =list(iterable or [] )
SCREAMING_SNAKE_CASE__ : list[_T] =[]
def __len__( self : List[str] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __magic_name__ ( self : List[Any] , __lowercase : _T ) -> None:
self._stacka.append(__lowercase )
def __magic_name__ ( self : Optional[Any] ) -> _T:
SCREAMING_SNAKE_CASE__ : List[str] =self._stacka.pop
SCREAMING_SNAKE_CASE__ : List[Any] =self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 711 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''YolosFeatureExtractor''']
lowercase_ = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 46 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
__A : Any = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__A : str = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
__A : Optional[Any] = '''▁'''
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Any , A : str , A : Dict="</s>" , A : Union[str, Any]="<unk>" , A : List[str]="<pad>" , A : List[str]=1_00 , A : Optional[Any]=None , A : Optional[Dict[str, Any]] = None , A : str=True , **A : Tuple , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ : Dict = [F'''<extra_id_{i}>''' for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase_ : Optional[int] = len(set(filter(lambda A : bool('''extra_id''' in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowercase_ : Tuple = legacy
lowercase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy=A , **A , )
lowercase_ : str = vocab_file
lowercase_ : Union[str, Any] = extra_ids
lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@staticmethod
def A ( A : Dict , A : int , A : List[Any] ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase_ : Tuple = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , A , )
return max_model_length
@property
def A ( self : List[Any] ) -> Optional[int]:
return self.sp_model.get_piece_size() + self._extra_ids
def A ( self : Optional[int] ) -> List[Any]:
lowercase_ : Tuple = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Any , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def A ( self : str ) -> Any:
return list(
set(filter(lambda A : bool(re.search(R'''<extra_id_\d+>''' , A ) ) is not None , self.additional_special_tokens ) ) )
def A ( self : Optional[int] ) -> Optional[int]:
return [self._convert_token_to_id(A ) for token in self.get_sentinel_tokens()]
def A ( self : Dict , A : List[int] ) -> List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A ( self : Dict , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Any = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
lowercase_ : List[Any] = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def __getstate__( self : int ) -> Dict:
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : Dict = None
return state
def __setstate__( self : Optional[int] , A : Optional[Any] ) -> Any:
lowercase_ : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : str = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[Any] , A : "TextInput" , **A : Optional[int] ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase_ : str = SPIECE_UNDERLINE + text.replace(A , ''' ''' )
return super().tokenize(A , **A )
def A ( self : Dict , A : List[str] , **A : Tuple ) -> Optional[int]:
if not self.legacy:
lowercase_ : int = text.startswith(A )
if is_first:
lowercase_ : Tuple = text[1:]
lowercase_ : List[str] = self.sp_model.encode(A , out_type=A )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(A ):
lowercase_ : Any = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A ( self : Any , A : Union[str, Any] ) -> Optional[Any]:
if token.startswith('''<extra_id_''' ):
lowercase_ : Optional[Any] = re.match(R'''<extra_id_(\d+)>''' , A )
lowercase_ : List[str] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(A )
def A ( self : List[Any] , A : Dict ) -> Union[str, Any]:
if index < self.sp_model.get_piece_size():
lowercase_ : Optional[int] = self.sp_model.IdToPiece(A )
else:
lowercase_ : List[str] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def A ( self : Optional[int] , A : List[str] ) -> Tuple:
lowercase_ : Optional[int] = []
lowercase_ : Tuple = ''''''
lowercase_ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowercase_ : Optional[Any] = True
lowercase_ : Optional[int] = []
else:
current_sub_tokens.append(A )
lowercase_ : Optional[int] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def A ( self : int , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[int] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 713 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = (DDPMScheduler,)
def A ( self : int , **A : Dict ) -> Dict:
lowercase_ : Optional[int] = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A )
return config
def A ( self : Tuple ) -> List[str]:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def A ( self : str ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def A ( self : Union[str, Any] ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def A ( self : Tuple ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def A ( self : Tuple ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def A ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def A ( self : Optional[Any] ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def A ( self : Optional[Any] ) -> Optional[int]:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A )
def A ( self : Optional[int] ) -> List[Any]:
lowercase_ : Any = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def A ( self : Dict ) -> List[str]:
lowercase_ : Union[str, Any] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Tuple = scheduler_class(**A )
lowercase_ : Tuple = len(A )
lowercase_ : Tuple = self.dummy_model()
lowercase_ : List[Any] = self.dummy_sample_deter
lowercase_ : Tuple = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowercase_ : int = model(A , A )
# 2. predict previous mean of sample x_t-1
lowercase_ : List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase_ : Tuple = pred_prev_sample
lowercase_ : str = torch.sum(torch.abs(A ) )
lowercase_ : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def A ( self : int ) -> str:
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ : Any = scheduler_class(**A )
lowercase_ : Any = len(A )
lowercase_ : Optional[int] = self.dummy_model()
lowercase_ : Tuple = self.dummy_sample_deter
lowercase_ : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowercase_ : str = model(A , A )
# 2. predict previous mean of sample x_t-1
lowercase_ : Any = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase_ : str = pred_prev_sample
lowercase_ : Any = torch.sum(torch.abs(A ) )
lowercase_ : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def A ( self : str ) -> int:
lowercase_ : Union[str, Any] = self.scheduler_classes[0]
lowercase_ : Tuple = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**A )
lowercase_ : Union[str, Any] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A )
lowercase_ : Any = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
lowercase_ : List[str] = -1
else:
lowercase_ : Dict = timesteps[i + 1]
lowercase_ : int = scheduler.previous_timestep(A )
lowercase_ : Any = prev_t.item()
self.assertEqual(A , A )
def A ( self : Union[str, Any] ) -> Dict:
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config()
lowercase_ : Tuple = scheduler_class(**A )
lowercase_ : Optional[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def A ( self : List[Any] ) -> Optional[Any]:
lowercase_ : Any = self.scheduler_classes[0]
lowercase_ : Any = self.get_scheduler_config()
lowercase_ : List[Any] = scheduler_class(**A )
lowercase_ : str = [1_00, 87, 50, 1, 0]
lowercase_ : str = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : List[str] = self.scheduler_classes[0]
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Union[str, Any] = scheduler_class(**A )
lowercase_ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 141 | 0 |
"""simple docstring"""
import numpy as np
_a = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _UpperCAmelCase:
def __init__( self) -> None:
'''simple docstring'''
_UpperCamelCase = np.array(__a)
def UpperCAmelCase ( self , __a) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = np.where(letter == self.SQUARE)
_UpperCamelCase = np.concatenate([indexa + 1, indexa + 1])
return indexes
def UpperCAmelCase ( self , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = message.lower()
_UpperCamelCase = message.replace(''' ''' , '''''')
_UpperCamelCase = message.replace('''j''' , '''i''')
_UpperCamelCase = np.empty((2, len(__a)))
for letter_index in range(len(__a)):
_UpperCamelCase = self.letter_to_numbers(message[letter_index])
_UpperCamelCase = numbers[0]
_UpperCamelCase = numbers[1]
_UpperCamelCase = first_step.reshape(2 * len(__a))
_UpperCamelCase = ''''''
for numbers_index in range(len(__a)):
_UpperCamelCase = int(second_step[numbers_index * 2])
_UpperCamelCase = int(second_step[(numbers_index * 2) + 1])
_UpperCamelCase = self.numbers_to_letter(__a , __a)
_UpperCamelCase = encoded_message + letter
return encoded_message
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = message.lower()
message.replace(''' ''' , '''''')
_UpperCamelCase = np.empty(2 * len(__a))
for letter_index in range(len(__a)):
_UpperCamelCase = self.letter_to_numbers(message[letter_index])
_UpperCamelCase = numbers[0]
_UpperCamelCase = numbers[1]
_UpperCamelCase = first_step.reshape((2, len(__a)))
_UpperCamelCase = ''''''
for numbers_index in range(len(__a)):
_UpperCamelCase = int(second_step[0, numbers_index])
_UpperCamelCase = int(second_step[1, numbers_index])
_UpperCamelCase = self.numbers_to_letter(__a , __a)
_UpperCamelCase = decoded_message + letter
return decoded_message
| 19 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCamelCase_ = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] , **A_ : Optional[int] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def a__ ( self : List[str] , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def a__ ( self : Tuple , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def a__ ( self : str ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
lowerCamelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(A_ , return_tensors='np' )
lowerCamelCase_ = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = processor(text=A_ )
lowerCamelCase_ = tokenizer(A_ , return_token_type_ids=A_ )
lowerCamelCase_ = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 70 | 0 |
"""simple docstring"""
__A : Tuple = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 281 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
return abs(_lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a, _lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowercase ,__lowercase : Any = y, x % y
return abs(_lowerCamelCase )
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
try:
__lowercase : Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
__lowercase : Optional[Any] = int(nums[0] )
__lowercase : str = int(nums[1] )
print(
F'greatest_common_divisor({num_a}, {num_a}) = '
F'{greatest_common_divisor(_lowerCamelCase, _lowerCamelCase )}' )
print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCamelCase, _lowerCamelCase )}' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 281 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=1E-1_2 ) -> str:
"""simple docstring"""
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__magic_name__ , axis=1 ) , a_min=__magic_name__ ) ).T
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__magic_name__ , axis=1 ) , a_min=__magic_name__ ) ).T
return jnp.matmul(__magic_name__ , norm_emb_a.T )
class A ( nn.Module ):
'''simple docstring'''
A__ = 42
A__ = jnp.floataa
def lowerCamelCase__ (self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ = nn.Dense(self.config.projection_dim , use_bias=_UpperCAmelCase , dtype=self.dtype )
lowercase__ = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase__ = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase__ = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowercase__ = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__(self : List[str] , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.vision_model(_UpperCAmelCase )[1]
lowercase__ = self.visual_projection(_UpperCAmelCase )
lowercase__ = jax_cosine_distance(_UpperCAmelCase , self.special_care_embeds )
lowercase__ = jax_cosine_distance(_UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ = 0.0
lowercase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ = jnp.round(_UpperCAmelCase , 3 )
lowercase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ = is_special_care * 0.01
lowercase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ = jnp.round(_UpperCAmelCase , 3 )
lowercase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = CLIPConfig
A__ = '''clip_input'''
A__ = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : List[str] , _UpperCAmelCase : CLIPConfig , _UpperCAmelCase : Optional[Tuple] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : jnp.dtype = jnp.floataa , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
if input_shape is None:
lowercase__ = (1, 224, 224, 3)
lowercase__ = self.module_class(config=_UpperCAmelCase , dtype=_UpperCAmelCase , **_UpperCAmelCase )
super().__init__(_UpperCAmelCase , _UpperCAmelCase , input_shape=_UpperCAmelCase , seed=_UpperCAmelCase , dtype=_UpperCAmelCase , _do_init=_do_init )
def lowerCamelCase__ (self : int , _UpperCAmelCase : jax.random.KeyArray , _UpperCAmelCase : Tuple , _UpperCAmelCase : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
lowercase__ = jax.random.normal(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ , lowercase__ = jax.random.split(_UpperCAmelCase )
lowercase__ = {"""params""": params_rng, """dropout""": dropout_rng}
lowercase__ = self.module.init(_UpperCAmelCase , _UpperCAmelCase )["""params"""]
return random_params
def __call__(self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : dict = None , ) -> Dict:
"""simple docstring"""
lowercase__ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 15 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict:
"""simple docstring"""
lowercase__ = None
if token is not None:
lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ = """636036"""
lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json()
return result["workflow_runs"]
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = get_daily_ci_runs(__magic_name__ )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str:
"""simple docstring"""
lowercase__ = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
lowercase__ = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
lowercase__ = f.read().decode("""UTF-8""" )
return results
| 15 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : int ) -> List[Any]:
if isinstance(__snake_case , __snake_case ):
__A : Dict = np.full((len(__snake_case ), sequence_length, 2) , __snake_case )
else:
__A : Dict = np.full((len(__snake_case ), sequence_length) , __snake_case )
for i, tensor in enumerate(__snake_case ):
if padding_side == "right":
if isinstance(__snake_case , __snake_case ):
__A : str = tensor[:sequence_length]
else:
__A : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(__snake_case , __snake_case ):
__A : str = tensor[:sequence_length]
else:
__A : str = tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCAmelCase ( __snake_case : List[Any] ) -> str:
__A : Union[str, Any] = ord(__snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__A : Any = unicodedata.category(__snake_case )
if cat.startswith('P' ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE (__lowerCAmelCase ):
lowerCAmelCase = 42
lowerCAmelCase = True
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = -100
lowerCAmelCase = "pt"
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
import torch
__A : Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
__A : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__A : Optional[int] = self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
__A : Dict = torch.tensor(batch['entity_ids']).shape[1]
__A : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
__A : Optional[int] = [
list(lowerCamelCase__) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__)) for label in labels
]
else:
__A : int = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__)) + list(lowerCamelCase__) for label in labels
]
__A : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
__A : List[str] = padding_tensor(lowerCamelCase__ , -1 , lowerCamelCase__ , lowerCamelCase__)
__A : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features]
__A : Any = padding_tensor(lowerCamelCase__ , (-1, -1) , lowerCamelCase__ , lowerCamelCase__)
__A : str = {k: torch.tensor(lowerCamelCase__ , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 718 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str:
__A : int = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Any = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__snake_case , output_all_encodings=__snake_case , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Optional[int] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : int = os.path.join(get_home_dir() , 'models' )
__A : Optional[int] = _load_vocab(__snake_case , __snake_case , __snake_case , cls=__snake_case )
__A : List[Any] = nlp.model.BERTModel(
__snake_case , len(__snake_case ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__snake_case , use_token_type_embed=__snake_case , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__snake_case , use_decoder=__snake_case , )
original_bort.load_parameters(__snake_case , cast_dtype=__snake_case , ignore_extra=__snake_case )
__A : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Dict = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__snake_case ),
}
__A : Optional[int] = BertConfig.from_dict(__snake_case )
__A : Any = BertForMaskedLM(__snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__snake_case : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__snake_case : Union[str, Any] , __snake_case : int ):
__A : str = hf_param.shape
__A : int = to_torch(params[gluon_param] )
__A : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Optional[int] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__A : List[Any] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__A : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__A : str = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
__A : Union[str, Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
__A : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : Optional[int] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__A : str = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__A : BertOutput = layer.output
__A : List[str] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__A : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__A : List[Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__A : Any = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : List[Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__A : int = tokenizer.encode_plus(__snake_case )['input_ids']
# Get gluon output
__A : List[Any] = mx.nd.array([input_ids] )
__A : Optional[int] = original_bort(inputs=__snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__snake_case )
__A : List[Any] = BertModel.from_pretrained(__snake_case )
hf_bort_model.eval()
__A : Dict = tokenizer.encode_plus(__snake_case , return_tensors='pt' )
__A : List[Any] = hf_bort_model(**__snake_case )[0]
__A : Tuple = output_gluon[0].asnumpy()
__A : Any = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : str = np.allclose(__snake_case , __snake_case , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 338 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
if "model" in orig_key:
_lowerCamelCase : List[str] = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowerCamelCase : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowerCamelCase : Tuple = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowerCamelCase : List[Any] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowerCamelCase : Optional[Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowerCamelCase : Any = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
_lowerCamelCase : Optional[Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowerCamelCase : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowerCamelCase : int = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowerCamelCase : Dict = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowerCamelCase : Union[str, Any] = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowerCamelCase : int = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowerCamelCase : int = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowerCamelCase : Any = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowerCamelCase : Optional[Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowerCamelCase : List[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowerCamelCase : Dict = 'yoso.' + orig_key
return orig_key
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[Any]:
for key in orig_state_dict.copy().keys():
_lowerCamelCase : str = orig_state_dict.pop(_A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCamelCase : Union[str, Any] = val
_lowerCamelCase : Optional[Any] = orig_state_dict['cls.predictions.decoder.bias']
_lowerCamelCase : Optional[Any] = torch.arange(_A ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
_lowerCamelCase : List[str] = torch.load(_A , map_location='''cpu''' )['model_state_dict']
_lowerCamelCase : List[Any] = YosoConfig.from_json_file(_A )
_lowerCamelCase : str = YosoForMaskedLM(_A )
_lowerCamelCase : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , _A )
print(model.load_state_dict(_A ) )
model.eval()
model.save_pretrained(_A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 434 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Any = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 444 | 0 |
import cva
import numpy as np
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
A_ : Tuple = k
A_ : Tuple = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self )->str:
'''simple docstring'''
return str(self.k )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
A_ : List[Any] = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
A_ : Optional[int] = img.shape
A_ : list[list[int]] = []
A_ : Optional[int] = img.copy()
A_ : str = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB )
A_ : List[str] = np.gradient(_SCREAMING_SNAKE_CASE )
A_ : Any = dx**2
A_ : List[str] = dy**2
A_ : Any = dx * dy
A_ : str = 0.0_4
A_ : int = self.window_size // 2
for y in range(_SCREAMING_SNAKE_CASE , h - offset ):
for x in range(_SCREAMING_SNAKE_CASE , w - offset ):
A_ : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : Union[str, Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ : List[Any] = (wxx * wyy) - (wxy**2)
A_ : str = wxx + wyy
A_ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCamelCase = HarrisCorner(0.04, 3)
UpperCamelCase , UpperCamelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 704 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = FunnelTokenizer
snake_case = FunnelTokenizerFast
snake_case = True
snake_case = True
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().setUp()
A_ : Dict = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Optional[int] = '''UNwant\u00E9d,running'''
A_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = self.tokenizer_class(self.vocab_file )
A_ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
A_ : Optional[Any] = tokenizer('''UNwant\u00E9d,running''' )
A_ : Tuple = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
A_ : str = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 152 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
if len(_UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
_lowerCamelCase , _lowerCamelCase : Tuple = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
_lowerCamelCase : Optional[int] = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
_lowerCamelCase : str = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCamelCase : Any = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 177 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ , A__ = image.size
A__ = max(lowercase_ , lowercase_ )
A__ = 800 if '''detection''' in checkpoint_url else 1_000
A__ = target_max_size / current_max_size
A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = F.to_tensor(lowercase_ )
A__ = F.normalize(lowercase_ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# create HuggingFace model and load state dict
A__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A__ = 15
A__ = 2
A__ = {0: '''table''', 1: '''table rotated'''}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 125
A__ = 6
A__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
A__ = TableTransformerForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# verify our conversion
A__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase_ )
A__ = Image.open(lowercase_ ).convert('''RGB''' )
A__ = normalize(resize(lowercase_ , lowercase_ ) ).unsqueeze(0 )
A__ = model(lowercase_ )
if "detection" in checkpoint_url:
A__ = (1, 15, 3)
A__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A__ = (1, 125, 7)
A__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase_ )
image_processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase : str = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCamelCase__ : Optional[Any] = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : Union[str, Any] = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : List[str] = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCamelCase__ : Any = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
lowerCamelCase__ : int = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
lowerCamelCase__ : List[Any] = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Any:
'''simple docstring'''
lowercase__ : List[str] = checkpoint[F'{old_prefix}.in_layers.0.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.in_layers.0.bias']
lowercase__ : Any = checkpoint[F'{old_prefix}.in_layers.2.weight']
lowercase__ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
lowercase__ : Optional[int] = checkpoint[F'{old_prefix}.emb_layers.1.weight']
lowercase__ : List[Any] = checkpoint[F'{old_prefix}.emb_layers.1.bias']
lowercase__ : List[Any] = checkpoint[F'{old_prefix}.out_layers.0.weight']
lowercase__ : Optional[int] = checkpoint[F'{old_prefix}.out_layers.0.bias']
lowercase__ : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.3.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
lowercase__ : Any = checkpoint[F'{old_prefix}.skip_connection.weight']
lowercase__ : Any = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : str = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowercase__ , lowercase__ , lowercase__ : Any = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowercase__ : Optional[Any] = checkpoint[F'{old_prefix}.norm.weight']
lowercase__ : Tuple = checkpoint[F'{old_prefix}.norm.bias']
lowercase__ : int = weight_q.squeeze(-1 ).squeeze(-1 )
lowercase__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
lowercase__ : str = weight_k.squeeze(-1 ).squeeze(-1 )
lowercase__ : Dict = bias_k.squeeze(-1 ).squeeze(-1 )
lowercase__ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
lowercase__ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
lowercase__ : List[Any] = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowercase__ : List[str] = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : List[Any] = torch.load(lowercase_ , map_location="""cpu""" )
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = checkpoint["""time_embed.0.weight"""]
lowercase__ : Any = checkpoint["""time_embed.0.bias"""]
lowercase__ : Dict = checkpoint["""time_embed.2.weight"""]
lowercase__ : str = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowercase__ : int = checkpoint["""label_emb.weight"""]
lowercase__ : Optional[int] = checkpoint["""input_blocks.0.0.weight"""]
lowercase__ : Union[str, Any] = checkpoint["""input_blocks.0.0.bias"""]
lowercase__ : Union[str, Any] = unet_config["""down_block_types"""]
lowercase__ : str = unet_config["""layers_per_block"""]
lowercase__ : Optional[int] = unet_config["""attention_head_dim"""]
lowercase__ : Optional[Any] = unet_config["""block_out_channels"""]
lowercase__ : Tuple = 1
lowercase__ : Optional[int] = channels_list[0]
for i, layer_type in enumerate(lowercase_ ):
lowercase__ : Any = channels_list[i]
lowercase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase_ ):
lowercase__ : Any = F'down_blocks.{i}.resnets.{j}'
lowercase__ : int = F'input_blocks.{current_layer}.0'
lowercase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowercase__ : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase_ ):
lowercase__ : Optional[Any] = F'down_blocks.{i}.resnets.{j}'
lowercase__ : Union[str, Any] = F'input_blocks.{current_layer}.0'
lowercase__ : Any = True if j == 0 and downsample_block_has_skip else False
lowercase__ : Optional[int] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
lowercase__ : Optional[int] = F'down_blocks.{i}.attentions.{j}'
lowercase__ : int = F'input_blocks.{current_layer}.1'
lowercase__ : str = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : Dict = F'down_blocks.{i}.downsamplers.0'
lowercase__ : Union[str, Any] = F'input_blocks.{current_layer}.0'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
lowercase__ : Optional[Any] = current_channels
# hardcoded the mid-block for now
lowercase__ : Any = """mid_block.resnets.0"""
lowercase__ : List[Any] = """middle_block.0"""
lowercase__ : Tuple = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = """mid_block.attentions.0"""
lowercase__ : Union[str, Any] = """middle_block.1"""
lowercase__ : Tuple = convert_attention(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = """mid_block.resnets.1"""
lowercase__ : List[Any] = """middle_block.2"""
lowercase__ : int = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = 0
lowercase__ : Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__ : List[str] = F'up_blocks.{i}.resnets.{j}'
lowercase__ : Dict = F'output_blocks.{current_layer}.0'
lowercase__ : str = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : List[Any] = F'up_blocks.{i}.upsamplers.0'
lowercase__ : Any = F'output_blocks.{current_layer-1}.1'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__ : Dict = F'up_blocks.{i}.resnets.{j}'
lowercase__ : Any = F'output_blocks.{current_layer}.0'
lowercase__ : List[str] = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ )
lowercase__ : Tuple = F'up_blocks.{i}.attentions.{j}'
lowercase__ : Optional[Any] = F'output_blocks.{current_layer}.1'
lowercase__ : Any = convert_attention(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
current_layer += 1
if i != len(lowercase_ ) - 1:
lowercase__ : Any = F'up_blocks.{i}.upsamplers.0'
lowercase__ : Optional[int] = F'output_blocks.{current_layer-1}.2'
lowercase__ : int = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = checkpoint["""out.0.weight"""]
lowercase__ : Any = checkpoint["""out.0.bias"""]
lowercase__ : Any = checkpoint["""out.2.weight"""]
lowercase__ : Any = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Dict = strabool(args.class_cond)
lowerCamelCase__ : List[Any] = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCamelCase__ : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCamelCase__ : Dict = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCamelCase__ : List[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCamelCase__ : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCamelCase__ : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCamelCase__ : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowerCamelCase__ : str = CMStochasticIterativeScheduler(**scheduler_config)
lowerCamelCase__ : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 12 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase : List[Any] = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase : List[str] = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase : Any = 'CompVis/stable-diffusion-v1-4'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A = True , ) -> List[str]:
super()._init_()
snake_case : List[Any] = StableDiffusionPipeline.from_pretrained(A )
snake_case : str = StableDiffusionPipeline.from_pretrained(A )
snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained(A )
snake_case : Dict = StableDiffusionPipeline(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase ( self , A = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Any:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[int]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> List[Any]:
snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case : List[Any] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 587 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] )
lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ ={"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 75 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = "deta"
_UpperCamelCase:int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , )-> str:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ =CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =backbone_config.pop("""model_type""" )
lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ =config_class.from_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =backbone_config
lowerCamelCase_ =num_queries
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =init_xavier_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =auxiliary_loss
lowerCamelCase_ =position_embedding_type
# deformable attributes
lowerCamelCase_ =num_feature_levels
lowerCamelCase_ =encoder_n_points
lowerCamelCase_ =decoder_n_points
lowerCamelCase_ =two_stage
lowerCamelCase_ =two_stage_num_proposals
lowerCamelCase_ =with_box_refine
lowerCamelCase_ =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =mask_loss_coefficient
lowerCamelCase_ =dice_loss_coefficient
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
lowerCamelCase_ =focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )-> int:
return self.encoder_attention_heads
@property
def _snake_case ( self )-> int:
return self.d_model
def _snake_case ( self )-> str:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.backbone_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 75 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
UpperCamelCase = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
UpperCamelCase = {"facebook/blenderbot_small-90M": 512}
def A ( lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Union[str, Any] = set()
UpperCamelCase__ :List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ :List[Any] = char
UpperCamelCase__ :Any = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ["""input_ids""", """attention_mask"""]
def __init__( self :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Tuple="__start__" , lowerCamelCase__ :Union[str, Any]="__end__" , lowerCamelCase__ :Dict="__unk__" , lowerCamelCase__ :List[Any]="__null__" , **lowerCamelCase__ :str , ):
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__ )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase__ :Optional[int] = json.load(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase__ :Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase__ :List[Any] = [tuple(merge.split() ) for merge in merges]
UpperCamelCase__ :List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCamelCase__ :Union[str, Any] = {}
@property
def __a ( self :Dict ):
return len(self.encoder )
def __a ( self :Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ :List[str] = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = re.sub("""(')""" , r""" \1 """ , lowerCamelCase__ )
UpperCamelCase__ :Dict = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase__ )
if "\n" in token:
UpperCamelCase__ :Dict = token.replace("""\n""" , """ __newln__""" )
UpperCamelCase__ :Dict = token.split(""" """ )
UpperCamelCase__ :Union[str, Any] = []
for token in tokens:
if not len(lowerCamelCase__ ):
continue
UpperCamelCase__ :Any = token.lower()
UpperCamelCase__ :Tuple = tuple(lowerCamelCase__ )
UpperCamelCase__ :List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
UpperCamelCase__ :Optional[Any] = get_pairs(lowerCamelCase__ )
if not pairs:
words.append(lowerCamelCase__ )
continue
while True:
UpperCamelCase__ :Optional[Any] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ :int = bigram
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :List[str] = 0
while i < len(lowerCamelCase__ ):
try:
UpperCamelCase__ :Optional[int] = word.index(lowerCamelCase__ , lowerCamelCase__ )
new_word.extend(word[i:j] )
UpperCamelCase__ :Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ :Dict = tuple(lowerCamelCase__ )
UpperCamelCase__ :Any = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCamelCase__ :str = get_pairs(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = """@@ """.join(lowerCamelCase__ )
UpperCamelCase__ :List[str] = word[:-4]
UpperCamelCase__ :Optional[int] = word
words.append(lowerCamelCase__ )
return " ".join(lowerCamelCase__ )
def __a ( self :Dict , lowerCamelCase__ :str ):
UpperCamelCase__ :Dict = []
UpperCamelCase__ :List[Any] = re.findall(r"""\S+\n?""" , lowerCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__ ).split(""" """ ) ) )
return split_tokens
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :List[Any] = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def __a ( self :Optional[int] , lowerCamelCase__ :int ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def __a ( self :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :int = """ """.join(lowerCamelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __a ( self :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ :Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ :List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
UpperCamelCase__ :Optional[Any] = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase__ :str = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file | 45 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = (UnCLIPScheduler,)
def UpperCAmelCase ( self : Optional[Any] ,**_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_snake_case )
return config
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_snake_case )
def UpperCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_snake_case )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_snake_case ,prev_timestep=_snake_case )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase__ : List[Any] = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase__ : Optional[Any] = scheduler_class(**_snake_case )
lowercase__ : str = 0.5
assert scheduler._get_variance(1 ,predicted_variance=_snake_case ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 ,predicted_variance=_snake_case ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 ,predicted_variance=_snake_case ) - -0.001_0011 < 1e-5
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : List[str] = scheduler_class(**_snake_case )
lowercase__ : Optional[Any] = scheduler.timesteps
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter
lowercase__ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(_snake_case ):
# 1. predict noise residual
lowercase__ : Optional[Any] = model(_snake_case ,_snake_case )
# 2. predict previous mean of sample x_t-1
lowercase__ : Union[str, Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
lowercase__ : str = pred_prev_sample
lowercase__ : Any = torch.sum(torch.abs(_snake_case ) )
lowercase__ : str = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(25 )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Any = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0 )
for i, t in enumerate(_snake_case ):
# 1. predict noise residual
lowercase__ : Optional[int] = model(_snake_case ,_snake_case )
if i + 1 == timesteps.shape[0]:
lowercase__ : str = None
else:
lowercase__ : Tuple = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase__ : Dict = scheduler.step(
_snake_case ,_snake_case ,_snake_case ,prev_timestep=_snake_case ,generator=_snake_case ).prev_sample
lowercase__ : Optional[Any] = pred_prev_sample
lowercase__ : Tuple = torch.sum(torch.abs(_snake_case ) )
lowercase__ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
pass
| 560 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'ibert'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=False, lowerCamelCase_="none", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : List[Any] = position_embedding_type
lowerCamelCase__ : List[str] = quant_mode
lowerCamelCase__ : Tuple = force_dequant
class a_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 711 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase__ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ = '''MobileNetV1Config'''
# Base docstring
UpperCAmelCase__ = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase__ = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase__ = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase__ = '''tabby, tabby cat'''
UpperCAmelCase__ = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def a_ (__A , __A , __A=None ) -> Union[str, Any]:
"""simple docstring"""
__a : Union[str, Any] = {}
if isinstance(__A , __A ):
__a : List[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Optional[Any] = "MobilenetV1/Conv2d_0/"
__a : str = backbone.conv_stem.convolution.weight
__a : int = backbone.conv_stem.normalization.bias
__a : Dict = backbone.conv_stem.normalization.weight
__a : Optional[int] = backbone.conv_stem.normalization.running_mean
__a : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : Optional[int] = i + 1
__a : Tuple = i * 2
__a : int = backbone.layer[pt_index]
__a : List[str] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__a : Optional[int] = pointer.convolution.weight
__a : Optional[int] = pointer.normalization.bias
__a : int = pointer.normalization.weight
__a : int = pointer.normalization.running_mean
__a : Union[str, Any] = pointer.normalization.running_var
__a : List[Any] = backbone.layer[pt_index + 1]
__a : Any = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__a : int = pointer.convolution.weight
__a : str = pointer.normalization.bias
__a : Optional[int] = pointer.normalization.weight
__a : Optional[Any] = pointer.normalization.running_mean
__a : int = pointer.normalization.running_var
if isinstance(__A , __A ):
__a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
__a : Union[str, Any] = model.classifier.weight
__a : Optional[int] = model.classifier.bias
return tf_to_pt_map
def a_ (__A , __A , __A ) -> Dict:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
__a : Tuple = tf.train.list_variables(__A )
__a : Dict = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
__a : List[str] = tf.train.load_variable(__A , __A )
__a : Dict = array
# Build TF to PyTorch weights loading map
__a : Tuple = _build_tf_to_pytorch_map(__A , __A , __A )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
__a : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
__a : Dict = np.transpose(__A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(__A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
__a : Union[str, Any] = torch.from_numpy(__A )
tf_weights.pop(__A , __A )
tf_weights.pop(name + "/RMSProp" , __A )
tf_weights.pop(name + "/RMSProp_1" , __A )
tf_weights.pop(name + "/ExponentialMovingAverage" , __A )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def a_ (__A , __A ) -> torch.Tensor:
"""simple docstring"""
__a , __a : Dict = features.shape[-2:]
__a , __a : List[Any] = conv_layer.stride
__a , __a : int = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : List[Any] = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : int = max(kernel_width - stride_width , 0 )
else:
__a : Union[str, Any] = max(kernel_width - (in_width % stride_width) , 0 )
__a : str = pad_along_width // 2
__a : List[Any] = pad_along_width - pad_left
__a : str = pad_along_height // 2
__a : str = pad_along_height - pad_top
__a : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__A , __A , "constant" , 0.0 )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__(self: str , __UpperCAmelCase: MobileNetVaConfig , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = 1 , __UpperCAmelCase: Optional[int] = 1 , __UpperCAmelCase: bool = False , __UpperCAmelCase: Optional[bool] = True , __UpperCAmelCase: Optional[bool or str] = True , ) -> None:
'''simple docstring'''
super().__init__()
__a : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__a : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : str = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode="zeros" , )
if use_normalization:
__a : List[Any] = nn.BatchNormad(
num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , )
else:
__a : str = None
if use_activation:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__a : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __UpperCAmelCase ):
__a : Dict = ACTaFN[config.hidden_act]
else:
__a : Optional[int] = config.hidden_act
else:
__a : Optional[Any] = None
def UpperCAmelCase__ (self: int , __UpperCAmelCase: torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
__a : str = apply_tf_padding(__UpperCAmelCase , self.convolution )
__a : int = self.convolution(__UpperCAmelCase )
if self.normalization is not None:
__a : Any = self.normalization(__UpperCAmelCase )
if self.activation is not None:
__a : Tuple = self.activation(__UpperCAmelCase )
return features
class snake_case_ ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case__ = MobileNetVaConfig
snake_case__ = load_tf_weights_in_mobilenet_va
snake_case__ = 'mobilenet_v1'
snake_case__ = 'pixel_values'
snake_case__ = False
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: Union[nn.Linear, nn.Convad] ) -> None:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase__ = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
UpperCAmelCase__ = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , UpperCAmelCase_ , )
class snake_case_ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__(self: Any , __UpperCAmelCase: MobileNetVaConfig , __UpperCAmelCase: bool = True ) -> str:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__a : Union[str, Any] = config
__a : str = 32
__a : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : List[Any] = MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , )
__a : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Optional[int] = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
__UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) )
__a : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: int ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Optional[torch.Tensor] = None , __UpperCAmelCase: Optional[bool] = None , __UpperCAmelCase: Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
__a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
__a : str = self.conv_stem(__UpperCAmelCase )
__a : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : int = layer_module(__UpperCAmelCase )
if output_hidden_states:
__a : Any = all_hidden_states + (hidden_states,)
__a : Union[str, Any] = hidden_states
if self.pooler is not None:
__a : List[Any] = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 )
else:
__a : Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
@add_start_docstrings(
"""\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , UpperCAmelCase_ , )
class snake_case_ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__(self: Dict , __UpperCAmelCase: MobileNetVaConfig ) -> None:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__a : Dict = config.num_labels
__a : Union[str, Any] = MobileNetVaModel(__UpperCAmelCase )
__a : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase )
__a : Optional[int] = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: Optional[torch.Tensor] = None , __UpperCAmelCase: Optional[bool] = None , __UpperCAmelCase: Optional[torch.Tensor] = None , __UpperCAmelCase: Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
__a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__a : int = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
__a : Dict = outputs.pooler_output if return_dict else outputs[1]
__a : List[str] = self.classifier(self.dropout(__UpperCAmelCase ) )
__a : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : Any = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : List[str] = "single_label_classification"
else:
__a : Optional[Any] = "multi_label_classification"
if self.config.problem_type == "regression":
__a : str = MSELoss()
if self.num_labels == 1:
__a : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Tuple = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__a : str = CrossEntropyLoss()
__a : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Optional[int] = BCEWithLogitsLoss()
__a : Tuple = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
__a : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 351 | import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = DownBlockaD # noqa F405
__UpperCAmelCase : int = 'down'
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Any = AttnDownBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'down'
def lowercase_ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = SkipDownBlockaD # noqa F405
__UpperCAmelCase : str = 'down'
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] = 'down'
@property
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = DownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'down'
@property
def lowercase_ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : List[Any] = 'down'
@property
def lowercase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UNetMidBlockaD # noqa F405
__UpperCAmelCase : Optional[int] = 'mid'
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : List[str] = 'mid'
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[Any] = 'mid'
@property
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = UpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : Any ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : str = 'up'
@property
def lowercase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = 3_2
return init_dict, inputs_dict
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = AttnUpBlockaD # noqa F405
__UpperCAmelCase : Dict = 'up'
@property
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = SkipUpBlockaD # noqa F405
__UpperCAmelCase : int = 'up'
@property
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : List[str] = 'up'
@property
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase )
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = UpDecoderBlockaD # noqa F405
__UpperCAmelCase : Tuple = 'up'
@property
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__UpperCAmelCase )
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] = 'up'
@property
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {"in_channels": 3_2, "out_channels": 3_2}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__UpperCAmelCase )
| 486 | 0 |
import math
import sys
def __lowerCAmelCase ( UpperCamelCase ) -> int:
if number != int(UpperCamelCase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowerCAmelCase__ : Any = [-1] * (number + 1)
lowerCAmelCase__ : Dict = 0
for i in range(1 , number + 1 ):
lowerCAmelCase__ : Dict = sys.maxsize
lowerCAmelCase__ : str = int(math.sqrt(UpperCamelCase ) )
for j in range(1 , root + 1 ):
lowerCAmelCase__ : Any = 1 + answers[i - (j**2)]
lowerCAmelCase__ : Optional[Any] = min(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 470 | 1 |
from __future__ import annotations
from collections.abc import Callable
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 100 , ) -> float:
'''simple docstring'''
__lowercase = x_start
__lowercase = fnc(_UpperCAmelCase )
__lowercase = 0.0
for _ in range(_UpperCAmelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase = (x_end - x_start) / steps + xa
__lowercase = fnc(_UpperCAmelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase = xa
__lowercase = fxa
return area
if __name__ == "__main__":
def __lowercase ( _UpperCAmelCase ) -> int:
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
lowerCAmelCase__ = 10
while i <= 100_000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 321 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=12 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0 , lowerCAmelCase_=None , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = projection_dim
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = scope
__lowercase = bos_token_id
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__lowercase = input_mask.numpy()
__lowercase , __lowercase = input_mask.shape
__lowercase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase_ ):
__lowercase = 1
__lowercase = 0
__lowercase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase_ )
def snake_case__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = TFBlipTextModel(config=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowercase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = BlipTextModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFBlipTextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase_ )
| 321 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_lowerCamelCase , **_lowerCamelCase ) -> Any:
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
A_ : Optional[int] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Union[str, Any] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
A_ : Tuple = len(_lowerCamelCase )
self.assertGreater(_lowerCamelCase , 0 )
self.assertEqual(
_lowerCamelCase , [
{
"""score""": ANY(_lowerCamelCase ),
"""label""": ANY(_lowerCamelCase ),
"""box""": {"""xmin""": ANY(_lowerCamelCase ), """ymin""": ANY(_lowerCamelCase ), """xmax""": ANY(_lowerCamelCase ), """ymax""": ANY(_lowerCamelCase )},
}
for i in range(_lowerCamelCase )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@require_torch
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
] , )
A_ : List[Any] = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}},
{"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}},
{"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}},
{"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Tuple = pipeline("""zero-shot-object-detection""" )
A_ : str = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
] , )
A_ : List[Any] = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
[
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
{"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}},
{"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : int = 0.2
A_ : Optional[int] = pipeline("""zero-shot-object-detection""" )
A_ : Union[str, Any] = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
{"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : int = 2
A_ : Optional[Any] = pipeline("""zero-shot-object-detection""" )
A_ : str = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}},
] , )
| 721 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = split_dict._to_yaml_list()
assert len(a_ ) == len(a_ )
A_ : Union[str, Any] = SplitDict._from_yaml_list(a_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A_ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
A_ : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=a_ ), SplitInfo(dataset_name="""my_dataset""" )] )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : int = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 385 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase="pt" ):
'''simple docstring'''
__lowerCAmelCase = {"""add_prefix_space""": True} if isinstance(__UpperCamelCase , __UpperCamelCase ) and not line.startswith(" " ) else {}
__lowerCAmelCase = padding_side
return tokenizer(
[line] , max_length=__UpperCamelCase , padding="max_length" if pad_to_max_length else None , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , ):
'''simple docstring'''
__lowerCAmelCase = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _UpperCamelCase ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a="train" , __a=None , __a=None , __a=None , __a="" , ):
super().__init__()
__lowerCAmelCase = Path(__a ).joinpath(type_path + ".source" )
__lowerCAmelCase = Path(__a ).joinpath(type_path + ".target" )
__lowerCAmelCase = self.get_char_lens(self.src_file )
__lowerCAmelCase = max_source_length
__lowerCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
__lowerCAmelCase = tokenizer
__lowerCAmelCase = prefix
if n_obs is not None:
__lowerCAmelCase = self.src_lens[:n_obs]
__lowerCAmelCase = src_lang
__lowerCAmelCase = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , __a ):
__lowerCAmelCase = index + 1 # linecache starts at 1
__lowerCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , __a ).rstrip("\n" )
__lowerCAmelCase = linecache.getline(str(self.tgt_file ) , __a ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __a ) else self.tokenizer
)
__lowerCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , __a ) else self.tokenizer
__lowerCAmelCase = encode_line(__a , __a , self.max_source_length , "right" )
__lowerCAmelCase = encode_line(__a , __a , self.max_target_length , "right" )
__lowerCAmelCase = source_inputs["""input_ids"""].squeeze()
__lowerCAmelCase = target_inputs["""input_ids"""].squeeze()
__lowerCAmelCase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( __a ):
return [len(__a ) for x in Path(__a ).open().readlines()]
def snake_case ( self , __a ):
__lowerCAmelCase = torch.stack([x["input_ids"] for x in batch] )
__lowerCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
__lowerCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
__lowerCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __a )
else self.tokenizer.pad_token_id
)
__lowerCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __a )
else self.tokenizer.pad_token_id
)
__lowerCAmelCase = trim_batch(__a , __a )
__lowerCAmelCase = trim_batch(__a , __a , attention_mask=__a )
__lowerCAmelCase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
A : Any = getLogger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = get_git_info()
save_json(__UpperCamelCase , os.path.join(__UpperCamelCase , "git_log.json" ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , **_UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase , **__UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = git.Repo(search_parent_directories=__UpperCamelCase )
__lowerCAmelCase = {
"""repo_id""": str(__UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return list(map(__UpperCamelCase , __UpperCamelCase ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(__UpperCamelCase , "wb" ) as f:
return pickle.dump(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def remove_articles(_UpperCamelCase ):
return re.sub(R"\b(a|an|the)\b" , " " , __UpperCamelCase )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
__lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = normalize_answer(__UpperCamelCase ).split()
__lowerCAmelCase = normalize_answer(__UpperCamelCase ).split()
__lowerCAmelCase = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
__lowerCAmelCase = sum(common.values() )
if num_same == 0:
return 0
__lowerCAmelCase = 1.0 * num_same / len(__UpperCamelCase )
__lowerCAmelCase = 1.0 * num_same / len(__UpperCamelCase )
__lowerCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
__lowerCAmelCase = 0
for hypo, pred in zip(__UpperCamelCase , __UpperCamelCase ):
em += exact_match_score(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCAmelCase = """dropout_rate"""
for p in extra_params:
if getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and not hasattr(__UpperCamelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
continue
__lowerCAmelCase = p if hasattr(__UpperCamelCase , __UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase , __UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
delattr(__UpperCamelCase , __UpperCamelCase )
return hparams, config
| 636 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase ( __UpperCamelCase = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
UpperCAmelCase__ : Dict = F"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCamelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
UpperCAmelCase__ : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase__ : str = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
UpperCAmelCase__ : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Dict = """, """.join(__UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('\n'.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 65 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( A_ : List[str] ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(A_ , '''_dynamo''' ):
return False
return isinstance(A_ , torch._dynamo.eval_frame.OptimizedModule )
def A ( A_ : Any , A_ : bool = True ):
snake_case : Union[str, Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
snake_case : str = is_compiled_module(A_ )
if is_compiled:
snake_case : Union[str, Any] = model
snake_case : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A_ , A_ ):
snake_case : Optional[int] = model.module
if not keep_fpaa_wrapper:
snake_case : str = getattr(A_ , '''forward''' )
snake_case : str = model.__dict__.pop('''_original_forward''' , A_ )
if original_forward is not None:
while hasattr(A_ , '''__wrapped__''' ):
snake_case : List[Any] = forward.__wrapped__
if forward == original_forward:
break
snake_case : Optional[int] = forward
if getattr(A_ , '''_converted_to_transformer_engine''' , A_ ):
convert_model(A_ , to_transformer_engine=A_ )
if is_compiled:
snake_case : str = model
snake_case : Dict = compiled_model
return model
def A ( ):
PartialState().wait_for_everyone()
def A ( A_ : Dict , A_ : List[Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A_ , A_ )
elif PartialState().local_process_index == 0:
torch.save(A_ , A_ )
@contextmanager
def A ( **A_ : str ):
for key, value in kwargs.items():
snake_case : List[Any] = str(A_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( A_ : Union[str, Any] ):
if not hasattr(A_ , '''__qualname__''' ) and not hasattr(A_ , '''__name__''' ):
snake_case : Dict = getattr(A_ , '''__class__''' , A_ )
if hasattr(A_ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(A_ , '''__name__''' ):
return obj.__name__
return str(A_ )
def A ( A_ : Tuple , A_ : Any ):
for key, value in source.items():
if isinstance(A_ , A_ ):
snake_case : Any = destination.setdefault(A_ , {} )
merge_dicts(A_ , A_ )
else:
snake_case : str = value
return destination
def A ( A_ : int = None ):
if port is None:
snake_case : Union[str, Any] = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 555 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A ( A_ : str ):
snake_case : List[str] = int(A_ )
snake_case, snake_case, snake_case : Any = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def A ( A_ : Optional[int] , A_ : int , A_ : List[str] , A_ : Optional[int] , A_ : Optional[int]=300 ):
# docstyle-ignore
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def A ( A_ : Dict ):
snake_case : Tuple = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case : List[str] = F"""{elt:.6f}""" if isinstance(A_ , A_ ) else str(A_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class a :
_snake_case = 5
_snake_case = 0.2
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Optional[str] = None, SCREAMING_SNAKE_CASE_ : bool = True, SCREAMING_SNAKE_CASE_ : Optional["NotebookTrainingTracker"] = None, SCREAMING_SNAKE_CASE_ : int = 3_00, ):
snake_case : List[Any] = total
snake_case : Union[str, Any] = '''''' if prefix is None else prefix
snake_case : List[Any] = leave
snake_case : int = parent
snake_case : List[str] = width
snake_case : Optional[int] = None
snake_case : Optional[Any] = None
snake_case : Tuple = None
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : str = None ):
snake_case : List[Any] = value
if comment is not None:
snake_case : Tuple = comment
if self.last_value is None:
snake_case : str = time.time()
snake_case : List[str] = value
snake_case : str = None
snake_case : Dict = self.warmup
snake_case : Tuple = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case : Tuple = time.time()
snake_case : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case : Any = self.elapsed_time / (value - self.start_value)
else:
snake_case : List[str] = None
if value >= self.total:
snake_case : List[Any] = self.total
snake_case : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = value
snake_case : List[Any] = current_time
if self.average_time_per_item is None:
snake_case : List[str] = 1
else:
snake_case : Optional[int] = max(int(self.update_every / self.average_time_per_item ), 1 )
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
snake_case : Optional[Any] = ''' ''' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
snake_case : int = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
snake_case : List[str] = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
snake_case : Optional[int] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __snake_case ( self : Optional[int] ):
snake_case : str = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case : Any = disp.display(disp.HTML(self.html_code ), display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __snake_case ( self : Optional[int] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class a ( __magic_name__ ):
def __init__( self : List[str], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
super().__init__(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = None if column_names is None else [column_names]
snake_case : str = None
def __snake_case ( self : Dict ):
snake_case : Tuple = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case : List[Any] = disp.display(disp.HTML(self.html_code ), display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Any ):
if self.inner_table is None:
snake_case : Optional[Any] = [list(values.keys() ), list(values.values() )]
else:
snake_case : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
snake_case : int = columns
self.inner_table.append([values[c] for c in columns] )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : int=None, SCREAMING_SNAKE_CASE_ : str=3_00 ):
snake_case : int = NotebookProgressBar(SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, parent=self, width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def __snake_case ( self : Union[str, Any] ):
snake_case : int = None
self.display()
class a ( __magic_name__ ):
def __init__( self : Optional[Any] ):
snake_case : Dict = None
snake_case : Union[str, Any] = None
snake_case : Dict = False
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : List[str] ):
snake_case : str = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case : List[str] = 0
snake_case : Dict = 0
snake_case : List[Any] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case : Tuple = NotebookTrainingTracker(state.max_steps, SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
snake_case : Tuple = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1, comment=F"""Epoch {epoch}/{state.num_train_epochs}""", force_update=self._force_next_update, )
snake_case : str = False
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Union[str, Any], SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Tuple=None, **SCREAMING_SNAKE_CASE_ : Tuple ):
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case : Dict = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
snake_case : Optional[Any] = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Union[str, Any], **SCREAMING_SNAKE_CASE_ : List[Any] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case : Optional[int] = None
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[int]=None, **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case : List[str] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case : List[Any] = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any]=None, **SCREAMING_SNAKE_CASE_ : Any ):
if self.training_tracker is not None:
snake_case : List[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case : Union[str, Any] = log['''loss''']
break
if self.first_column == "Epoch":
snake_case : Optional[int] = int(state.epoch )
else:
snake_case : Optional[int] = state.global_step
snake_case : Optional[int] = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case : str = re.sub(R'''\_loss$''', '''''', SCREAMING_SNAKE_CASE_ )
snake_case : str = metrics.pop('''total_flos''', SCREAMING_SNAKE_CASE_ )
snake_case : int = metrics.pop('''epoch''', SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = metrics.pop(F"""{metric_key_prefix}_runtime""", SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""", SCREAMING_SNAKE_CASE_ )
snake_case : Any = metrics.pop(F"""{metric_key_prefix}_steps_per_second""", SCREAMING_SNAKE_CASE_ )
snake_case : Dict = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""", SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
snake_case : Optional[int] = v
else:
snake_case : int = k.split('''_''' )
snake_case : Any = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case : Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
snake_case : Any = None
# Evaluation takes a long time so we should force the next update.
snake_case : Optional[int] = True
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : str ):
self.training_tracker.update(
state.global_step, comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""", force_update=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = None
| 555 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 104 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "sequence-classification"
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> Any:
if type(SCREAMING_SNAKE_CASE__ ) == dict:
A__ = Namespace(**SCREAMING_SNAKE_CASE__ )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return self.model(**SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**SCREAMING_SNAKE_CASE__ )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]["scheduler"]
A__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case__ ( self ) -> List[str]:
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
A__ = "dev" if mode == "test" else mode
A__ = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("Loading features from cached file %s" , SCREAMING_SNAKE_CASE__ )
A__ = torch.load(SCREAMING_SNAKE_CASE__ )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A__ = self(**SCREAMING_SNAKE_CASE__ )
A__ , A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> tuple:
A__ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(SCREAMING_SNAKE_CASE__ )
A__ = np.concatenate([x["target"] for x in outputs] , axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> dict:
A__ , A__ , A__ = self._eval_end(SCREAMING_SNAKE_CASE__ )
A__ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> dict:
A__ , A__ , A__ = self._eval_end(SCREAMING_SNAKE_CASE__ )
A__ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=SCREAMING_SNAKE_CASE__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase_, os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(UpperCAmelCase_, os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
"./results", F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""", )
os.makedirs(args.output_dir )
A__ = GLUETransformer(UpperCAmelCase_ )
A__ = generic_train(UpperCAmelCase_, UpperCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt" ), recursive=UpperCAmelCase_ ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 104 | 1 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES
else:
__SCREAMING_SNAKE_CASE = {tokenizer_name: getattr(__UpperCAmelCase , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES[tokenizer_name]
__SCREAMING_SNAKE_CASE = True
if checkpoint_name is None:
__SCREAMING_SNAKE_CASE = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__SCREAMING_SNAKE_CASE = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(__UpperCAmelCase , force_download=__UpperCAmelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = checkpoint.split("""/""" )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
elif add_prefix:
__SCREAMING_SNAKE_CASE = checkpoint
__SCREAMING_SNAKE_CASE = dump_path
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__SCREAMING_SNAKE_CASE = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__SCREAMING_SNAKE_CASE = file_path.split(__UpperCAmelCase )[-1][0]
if next_char == "/":
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__SCREAMING_SNAKE_CASE = tokenizer.save_pretrained(
__UpperCAmelCase , legacy_format=__UpperCAmelCase , filename_prefix=__UpperCAmelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCAmelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowercase_ :
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = True , a_ = False ) -> Dict:
"""simple docstring"""
UpperCAmelCase = scheduler
UpperCAmelCase = optimizers if isinstance(a_ , (list, tuple) ) else [optimizers]
UpperCAmelCase = split_batches
UpperCAmelCase = step_with_optimizer
UpperCAmelCase = GradientState()
def snake_case_ ( self , *a_ , **a_ ) -> str:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a_ , **a_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a_ , **a_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase = AcceleratorState().num_processes
for _ in range(a_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a_ , **a_ )
else:
self.scheduler.step(*a_ , **a_ )
def snake_case_ ( self ) -> int:
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case_ ( self , a_ ) -> Dict:
"""simple docstring"""
self.scheduler.load_state_dict(a_ )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case_ ( self , *a_ , **a_ ) -> Union[str, Any]:
"""simple docstring"""
return self.scheduler.print_lr(*a_ , **a_ )
| 447 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 1000 ):
UpperCAmelCase = -1
UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase = a * b * c
if candidate >= product:
UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 447 | 1 |
class UpperCAmelCase_ ( __lowercase ):
pass
class UpperCAmelCase_ ( __lowercase ):
pass
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Optional[int]:
lowerCAmelCase = [
[],
[],
[],
]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None:
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCAmelCase__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Optional[Any] ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class UpperCAmelCase_ :
def __init__( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase = []
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> None:
if len(self.queue ) == 1_0_0:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(UpperCAmelCase__ )
return data
def __str__( self : str ) -> str:
return str(self.queue )
def a_ ( ):
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ):
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 706 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = '''glpn'''
def __init__( self : int , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=[2, 2, 2, 2] , UpperCAmelCase__ : Tuple=[8, 4, 2, 1] , UpperCAmelCase__ : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , UpperCAmelCase__ : List[Any]=[7, 3, 3, 3] , UpperCAmelCase__ : List[Any]=[4, 2, 2, 2] , UpperCAmelCase__ : Optional[Any]=[1, 2, 5, 8] , UpperCAmelCase__ : Union[str, Any]=[4, 4, 4, 4] , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=1E-6 , UpperCAmelCase__ : int=6_4 , UpperCAmelCase__ : int=1_0 , UpperCAmelCase__ : str=-1 , **UpperCAmelCase__ : Tuple , ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = depths
lowerCAmelCase = sr_ratios
lowerCAmelCase = hidden_sizes
lowerCAmelCase = patch_sizes
lowerCAmelCase = strides
lowerCAmelCase = mlp_ratios
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = drop_path_rate
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = max_depth
lowerCAmelCase = head_in_index
| 513 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_snake_case = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 500 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase : List[str] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 500 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=None ) -> Dict:
"""simple docstring"""
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=None ) -> Union[str, Any]:
"""simple docstring"""
if red is not None:
snake_case__ : Dict = red
if green is not None:
snake_case__ : Any = green
if blue is not None:
snake_case__ : Optional[Any] = blue
if red_edge is not None:
snake_case__ : str = red_edge
if nir is not None:
snake_case__ : Optional[Any] = nir
return True
def lowerCAmelCase ( self : Tuple , __UpperCamelCase : int="" , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[int]=None ) -> Tuple:
"""simple docstring"""
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
snake_case__ : Union[str, Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCAmelCase ( self : Optional[Any] , __UpperCamelCase : Any=0.08 , __UpperCamelCase : Union[str, Any]=1.22 , __UpperCamelCase : Union[str, Any]=0.03 ) -> Optional[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return (self.nir / self.green) - 1
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (self.red - self.blue) / self.red
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
snake_case__ : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return self.nir - self.green
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : List[str]=0.16 ) -> int:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : int=0.5 ) -> Optional[Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCAmelCase ( self : Tuple , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None ) -> Any:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self.nir / self.red
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case__ : int = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.nir / self.red
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 574 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE (lowercase__, unittest.TestCase ):
A__ = UnCLIPImageVariationPipeline
A__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A__ = IMAGE_VARIATION_BATCH_PARAMS
A__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case__ : Tuple = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(1 )
snake_case__ : int = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] = self.dummy_decoder
snake_case__ : int = self.dummy_text_proj
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_super_res_first
snake_case__ : str = self.dummy_super_res_last
snake_case__ : Optional[int] = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : int = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : int = CLIPImageProcessor(crop_size=32 , size=32 )
snake_case__ : str = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Dict=True ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
snake_case__ : Any = input_image * 0.5 + 0.5
snake_case__ : Optional[Any] = input_image.clamp(0 , 1 )
snake_case__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : List[str] = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = '''cpu'''
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : str = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[str] = pipe(**__UpperCamelCase )
snake_case__ : str = output.images
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case__ : int = '''cpu'''
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Any = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Optional[int] = pipe(**__UpperCamelCase )
snake_case__ : str = output.images
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[str] = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : Optional[Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''cpu'''
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : int = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case__ : List[Any] = pipe(**__UpperCamelCase )
snake_case__ : Tuple = output.images
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[Any] = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case__ : int = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : str = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case__ : int = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = torch.device('''cpu''' )
class _SCREAMING_SNAKE_CASE :
A__ = 1
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : str = pipe.decoder.dtype
snake_case__ : Tuple = 1
snake_case__ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : Union[str, Any] = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : Optional[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Optional[int] = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase ).images
snake_case__ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
snake_case__ : List[str] = pipeline_inputs.pop('''image''' )
snake_case__ : Dict = pipe.image_encoder(__UpperCamelCase ).image_embeds
snake_case__ : Optional[Any] = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase , image_embeddings=__UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : str = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , expected_max_diff=__UpperCamelCase )
@skip_mps
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = torch_device == '''cpu'''
snake_case__ : List[Any] = True
snake_case__ : int = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case__ : Dict = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case__ : Dict = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : Optional[int] = pipeline(
__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase , 15 )
| 574 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCAmelCase ( __lowercase ):
_a = """bart"""
_a = ["""past_key_values"""]
_a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase=50_265 , lowerCAmelCase=1_024 , lowerCAmelCase=12 , lowerCAmelCase=4_096 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=4_096 , lowerCAmelCase=16 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase="gelu" , lowerCAmelCase=1_024 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=3 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ) -> Dict:
'''simple docstring'''
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =d_model
_lowercase =encoder_ffn_dim
_lowercase =encoder_layers
_lowercase =encoder_attention_heads
_lowercase =decoder_ffn_dim
_lowercase =decoder_layers
_lowercase =decoder_attention_heads
_lowercase =dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =activation_function
_lowercase =init_std
_lowercase =encoder_layerdrop
_lowercase =decoder_layerdrop
_lowercase =classifier_dropout
_lowercase =use_cache
_lowercase =encoder_layers
_lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowerCAmelCase ):
_lowercase =self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class __lowerCAmelCase ( __lowercase ):
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase ={0: "batch"}
_lowercase ={0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowercase ={0: "batch", 1: "decoder_sequence"}
_lowercase ={0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase =self.num_layers
for i in range(lowerCAmelCase ):
_lowercase ={0: "batch", 2: "past_sequence + sequence"}
_lowercase ={0: "batch", 2: "past_sequence + sequence"}
else:
_lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase =super().outputs
else:
_lowercase =super(lowerCAmelCase , self ).outputs
if self.use_past:
_lowercase =self.num_layers
for i in range(lowerCAmelCase ):
_lowercase ={0: "batch", 2: "past_sequence + sequence"}
_lowercase ={0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
_lowercase =seq_length if not self.use_past else 1
_lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowercase ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowercase =dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase =common_inputs["input_ids"].shape
_lowercase =common_inputs["decoder_input_ids"].shape[1]
_lowercase =self.num_attention_heads
_lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase =decoder_seq_length + 3
_lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
_lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase =self.num_layers
_lowercase =min(lowerCAmelCase , lowerCAmelCase )
_lowercase =max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
_lowercase ="encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
_lowercase =encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowercase =seqlen + 2
_lowercase =self.num_layers
_lowercase =self.num_attention_heads
_lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase =common_inputs["attention_mask"].dtype
_lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
_lowercase =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase =compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase =tokenizer.num_special_tokens_to_add(lowerCAmelCase )
_lowercase =compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowercase =[" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase =dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
_lowercase =self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
_lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase =super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
_lowercase =super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 291 |
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :str , a :str ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = text, pattern
__UpperCamelCase , __UpperCamelCase : Tuple = len(a ), len(a )
def _lowerCamelCase ( self :Any , a :str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self :str , a :int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self :Union[str, Any] ) -> list[int]:
# searches pattern in text and returns index positions
__UpperCamelCase : Any = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase : List[Any] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
__UpperCamelCase : Any = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase : Any = 'ABAABA'
lowercase : str = 'AB'
lowercase : str = BoyerMooreSearch(text, pattern)
lowercase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 557 | 0 |
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return getitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return delitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ):
try:
return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
UpperCamelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCamelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = HashMap(initial_block_size=4 )
A_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ):
A_ , A_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
A_ , A_ : int = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE )
assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ):
def is_public(SCREAMING_SNAKE_CASE ) -> bool:
return not name.startswith('''_''' )
A_ : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )}
A_ : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 152 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __A:
def lowercase__ ( self : int ):
torch.manual_seed(0 )
lowerCamelCase_ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase_ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCamelCase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : List[Any] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = inputs["""prompt"""]
lowerCamelCase_ = inputs["""generator"""]
lowerCamelCase_ = inputs["""num_inference_steps"""]
lowerCamelCase_ = inputs["""output_type"""]
if "image" in inputs:
lowerCamelCase_ = inputs["""image"""]
else:
lowerCamelCase_ = None
if "mask_image" in inputs:
lowerCamelCase_ = inputs["""mask_image"""]
else:
lowerCamelCase_ = None
if "original_image" in inputs:
lowerCamelCase_ = inputs["""original_image"""]
else:
lowerCamelCase_ = None
lowerCamelCase_ , lowerCamelCase_ = pipe.encode_prompt(__UpperCamelCase )
# inputs with prompt converted to embeddings
lowerCamelCase_ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCamelCase_ = image
if mask_image is not None:
lowerCamelCase_ = mask_image
if original_image is not None:
lowerCamelCase_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCamelCase , __UpperCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = inputs["""generator"""]
lowerCamelCase_ = inputs["""num_inference_steps"""]
lowerCamelCase_ = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCamelCase_ = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCamelCase_ = image
if mask_image is not None:
lowerCamelCase_ = mask_image
if original_image is not None:
lowerCamelCase_ = original_image
lowerCamelCase_ = pipe_loaded(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(to_np(__UpperCamelCase ) - to_np(__UpperCamelCase ) ).max()
self.assertLess(__UpperCamelCase , 1E-4 )
def lowercase__ ( self : Optional[int] ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
lowerCamelCase_ = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
lowerCamelCase_ = pipe_loaded(**__UpperCamelCase )[0]
lowerCamelCase_ = np.abs(to_np(__UpperCamelCase ) - to_np(__UpperCamelCase ) ).max()
self.assertLess(__UpperCamelCase , 1E-4 )
| 272 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : Dict = GPTSanJapaneseTokenizer
A_ : Optional[int] = False
A_ : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def __UpperCamelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
A = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
A = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any , **__UpperCamelCase : Any ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict ) -> List[str]:
A = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
A = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __UpperCamelCase ( self : str , __UpperCamelCase : Optional[Any] ) -> List[str]:
A , A = self.get_input_output_texts(__UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def __UpperCamelCase ( self : Any ) -> int:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> Tuple:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
pass # TODO add if relevant
def __UpperCamelCase ( self : List[str] ) -> int:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、世界。 こんばんは、㔺界。'
A = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
A = tokens + [tokenizer.unk_token]
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.get_tokenizer()
# Testing tokenization
A = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
A = 'こんにちは、、、、世界。こんばんは、、、、世界。'
A = tokenizer.encode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = 'こんにちは、世界。こんばんは、世界。😀'
A = tokenizer.encode(prefix_text + input_text )
A = tokenizer.encode('' , prefix_text=prefix_text + input_text )
A = tokenizer.encode(__UpperCamelCase , prefix_text=__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
A = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
A = 'こんにちは、世界。'
A = 'こんばんは、㔺界。😀'
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = len(tokenizer.encode(__UpperCamelCase ) ) - 2
A = [1] + [0] * (len_prefix + len_text + 1)
A = [1] * (len_prefix + len_text + 1) + [0]
A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A = tokenizer(prefix_text + input_text ).token_type_ids
A = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
A = tokenizer(__UpperCamelCase , prefix_text=__UpperCamelCase ).token_type_ids
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __UpperCamelCase ( self : Any ) -> str:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = tokenizer.encode('あンいワ' )
A = tokenizer.encode('' , prefix_text='あンいワ' )
A = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , tokenizer.decode(__UpperCamelCase ) )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
A = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
A = tokenizer.batch_encode_plus(__UpperCamelCase , padding=__UpperCamelCase )
# fmt: off
A = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token.attention_mask , __UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , __UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , __UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> str:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __UpperCamelCase ( self : Dict ) -> int:
# tokenizer has no padding token
pass | 106 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _A (UpperCamelCase : List[Any] ) ->Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase )
def _A (UpperCamelCase : str ) ->Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__ : Tuple = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase , id=UpperCamelCase )
| 96 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_lowercase = logging.getLogger(__name__)
class __A ( A_ ):
UpperCamelCase :Optional[int] = '''token-classification'''
def __init__(self , __magic_name__ ):
if type(__magic_name__ ) == dict:
lowerCamelCase__ : Any = Namespace(**__magic_name__ )
lowerCamelCase__ : str = import_module("""tasks""" )
try:
lowerCamelCase__ : Optional[Any] = getattr(__magic_name__ , hparams.task_type )
lowerCamelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
lowerCamelCase__ : Any = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase__ : Tuple = CrossEntropyLoss().ignore_index
super().__init__(__magic_name__ , len(self.labels ) , self.mode )
def _snake_case (self , **__magic_name__ ):
return self.model(**__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase__ : Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase__ : List[str] = self(**__magic_name__ )
lowerCamelCase__ : Dict = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _snake_case (self ):
lowerCamelCase__ : Dict = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase__ : List[str] = self._feature_file(__magic_name__ )
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __magic_name__ )
lowerCamelCase__ : Union[str, Any] = torch.load(__magic_name__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCamelCase__ : int = self.token_classification_task.read_examples_from_file(args.data_dir , __magic_name__ )
lowerCamelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
__magic_name__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __magic_name__ )
torch.save(__magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = False ):
lowerCamelCase__ : Any = self._feature_file(__magic_name__ )
logger.info("""Loading features from cached file %s""" , __magic_name__ )
lowerCamelCase__ : Optional[Any] = torch.load(__magic_name__ )
lowerCamelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase__ : int = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase__ : Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , batch_size=__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ ):
"""Compute validation""" ""
lowerCamelCase__ : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase__ : Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase__ : str = self(**__magic_name__ )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = outputs[:2]
lowerCamelCase__ : List[Any] = logits.detach().cpu().numpy()
lowerCamelCase__ : Dict = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCamelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCamelCase__ : List[str] = np.argmax(__magic_name__ , axis=2 )
lowerCamelCase__ : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCamelCase__ : Optional[int] = dict(enumerate(self.labels ) )
lowerCamelCase__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase__ : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__magic_name__ , __magic_name__ ),
"""precision""": precision_score(__magic_name__ , __magic_name__ ),
"""recall""": recall_score(__magic_name__ , __magic_name__ ),
"""f1""": fa_score(__magic_name__ , __magic_name__ ),
}
lowerCamelCase__ : Dict = dict(results.items() )
lowerCamelCase__ : str = results
return ret, preds_list, out_label_list
def _snake_case (self , __magic_name__ ):
# when stable
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : str = self._eval_end(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case (self , __magic_name__ ):
# updating to test_epoch_end instead of deprecated test_end
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = self._eval_end(__magic_name__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase__ : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case (__magic_name__ , __magic_name__ ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__magic_name__ , __magic_name__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__magic_name__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__magic_name__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__magic_name__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__magic_name__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
_lowercase = parser.parse_args()
_lowercase = NERTransformer(args)
_lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
_lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 96 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _a ( A__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE_ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : str = "text"
SCREAMING_SNAKE_CASE_ : str = "summary"
@property
def _lowercase ( self ) -> Dict:
return {self.text_column: "text", self.summary_column: "summary"}
| 185 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( A__ ):
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[Any]=64 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=16 , SCREAMING_SNAKE_CASE : Tuple=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE : Tuple=[4, 6, 8] , SCREAMING_SNAKE_CASE : Dict=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE : int=[2, 2, 2] , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=2 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = kernel_size
UpperCamelCase__ : str = stride
UpperCamelCase__ : int = padding
UpperCamelCase__ : int = hidden_sizes
UpperCamelCase__ : Dict = num_attention_heads
UpperCamelCase__ : int = depths
UpperCamelCase__ : Optional[Any] = key_dim
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : str = attention_ratio
UpperCamelCase__ : int = mlp_ratio
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : List[str] = num_labels
UpperCamelCase__ : int = initializer_range
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( A__ , A__ , unittest.TestCase ):
_lowerCAmelCase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = LevitModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = outputs.hidden_states
UpperCamelCase__ : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ : Union[str, Any] = False
UpperCamelCase__ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[int] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ : Optional[int] = problem_type["title"]
UpperCamelCase__ : Tuple = problem_type["num_labels"]
UpperCamelCase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase__ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase__ : Any = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 228 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowerCamelCase = logging.getLogger(__name__)
class snake_case_ (a__ ):
"""simple docstring"""
def __init__( self ,lowercase=-1):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = label_idx
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
if isinstance(_A ,_A):
UpperCAmelCase_ : Any = mode.value
UpperCAmelCase_ : Optional[Any] = os.path.join(_A ,F"""{mode}.txt""")
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Tuple = []
with open(_A ,encoding="utf-8") as f:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Any = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A))
guid_index += 1
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = []
else:
UpperCAmelCase_ : int = line.split(" ")
words.append(splits[0])
if len(_A) > 1:
labels.append(splits[self.label_idx].replace("\n" ,""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A))
return examples
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : int = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(_A)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ : int = line.split()[0] + ' ' + preds_list[example_id].pop(0) + '\n'
writer.write(_A)
else:
logger.warning("Maximum sequence length exceeded: No prediction for \'%s\'." ,line.split()[0])
def A_ ( self ,lowercase):
"""simple docstring"""
if path:
with open(_A ,"r") as f:
UpperCAmelCase_ : Any = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case_ (a__ ):
"""simple docstring"""
def __init__( self):
"""simple docstring"""
super().__init__(label_idx=-2)
def A_ ( self ,lowercase):
"""simple docstring"""
if path:
with open(_A ,"r") as f:
UpperCAmelCase_ : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : Any = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case_ (a__ ):
"""simple docstring"""
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
if isinstance(_A ,_A):
UpperCAmelCase_ : Dict = mode.value
UpperCAmelCase_ : Dict = os.path.join(_A ,F"""{mode}.txt""")
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : str = []
with open(_A ,encoding="utf-8") as f:
for sentence in parse_incr(_A):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Any = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(_A) == len(_A)
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=_A ,labels=_A))
guid_index += 1
return examples
def A_ ( self ,lowercase ,lowercase ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
for sentence in parse_incr(_A):
UpperCAmelCase_ : str = preds_list[example_id]
UpperCAmelCase_ : Tuple = ''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0)}) """
out += "\n"
writer.write(_A)
example_id += 1
def A_ ( self ,lowercase):
"""simple docstring"""
if path:
with open(_A ,"r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 716 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case_ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self ,lowercase=None ,**lowercase):
"""simple docstring"""
super().__init__(features=lowercase)
UpperCAmelCase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A_ ( self ,lowercase):
"""simple docstring"""
import torch
if isinstance(lowercase ,lowercase) and column:
if all(
isinstance(lowercase ,torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(lowercase)
return column
def A_ ( self ,lowercase):
"""simple docstring"""
import torch
if isinstance(lowercase ,(str, bytes, type(lowercase))):
return value
elif isinstance(lowercase ,(np.character, np.ndarray)) and np.issubdtype(value.dtype ,np.character):
return value.tolist()
UpperCAmelCase_ : List[str] = {}
if isinstance(lowercase ,(np.number, np.ndarray)) and np.issubdtype(value.dtype ,np.integer):
UpperCAmelCase_ : Dict = {"dtype": torch.intaa}
elif isinstance(lowercase ,(np.number, np.ndarray)) and np.issubdtype(value.dtype ,np.floating):
UpperCAmelCase_ : Optional[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowercase ,PIL.Image.Image):
UpperCAmelCase_ : List[str] = np.asarray(lowercase)
return torch.tensor(lowercase ,**{**default_dtype, **self.torch_tensor_kwargs})
def A_ ( self ,lowercase):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(lowercase ,"__array__") and not isinstance(lowercase ,torch.Tensor):
UpperCAmelCase_ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowercase ,np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct])
elif isinstance(lowercase ,(list, tuple)):
return self._consolidate([self.recursive_tensorize(lowercase) for substruct in data_struct])
return self._tensorize(lowercase)
def A_ ( self ,lowercase):
"""simple docstring"""
return map_nested(self._recursive_tensorize ,lowercase ,map_list=lowercase)
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.numpy_arrow_extractor().extract_row(lowercase)
UpperCAmelCase_ : Tuple = self.python_features_decoder.decode_row(lowercase)
return self.recursive_tensorize(lowercase)
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : str = self.numpy_arrow_extractor().extract_column(lowercase)
UpperCAmelCase_ : List[Any] = self.python_features_decoder.decode_column(lowercase ,pa_table.column_names[0])
UpperCAmelCase_ : List[Any] = self.recursive_tensorize(lowercase)
UpperCAmelCase_ : List[str] = self._consolidate(lowercase)
return column
def A_ ( self ,lowercase):
"""simple docstring"""
UpperCAmelCase_ : Any = self.numpy_arrow_extractor().extract_batch(lowercase)
UpperCAmelCase_ : Optional[int] = self.python_features_decoder.decode_batch(lowercase)
UpperCAmelCase_ : List[Any] = self.recursive_tensorize(lowercase)
for column_name in batch:
UpperCAmelCase_ : Optional[int] = self._consolidate(batch[column_name])
return batch
| 455 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import re
def lowerCAmelCase ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
lowerCAmelCase : str = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 202 | 0 |
from torch import nn
def _snake_case ( __snake_case ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 71 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase = field(
default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, )
UpperCAmelCase = field(
default=1024, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
}, )
UpperCAmelCase = field(
default=142, metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(default=-1, metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase = field(default=-1, metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Source language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Target language id for translation."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__snake_case , os.path.join(__snake_case , f"""{split}_results.json""" ) )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case , __snake_case , __snake_case ):
assert hasattr(__snake_case , __snake_case ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case , __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator(
__snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case , metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip , __snake_case )
write_txt_file(__snake_case , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 71 | 1 |
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if num < 0:
return False
lowercase__ = num
lowercase__ = 0
while num > 0:
lowercase__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "embed_dim" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "num_heads" ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[16, 48, 96] , _SCREAMING_SNAKE_CASE=[1, 3, 6] , _SCREAMING_SNAKE_CASE=[1, 2, 10] , _SCREAMING_SNAKE_CASE=[7, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 2] , _SCREAMING_SNAKE_CASE=[False, False, True] , _SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , ) -> Optional[Any]:
snake_case_ : List[str] = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Any = image_size
snake_case_ : Dict = patch_sizes
snake_case_ : Union[str, Any] = patch_stride
snake_case_ : List[str] = patch_padding
snake_case_ : str = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Dict = num_labels
snake_case_ : Tuple = num_channels
snake_case_ : Tuple = embed_dim
snake_case_ : str = num_heads
snake_case_ : Any = stride_kv
snake_case_ : Optional[int] = depth
snake_case_ : Any = cls_token
snake_case_ : Tuple = attention_drop_rate
snake_case_ : Dict = initializer_range
snake_case_ : Any = layer_norm_eps
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : str = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Any:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Optional[Any] = CvtModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = (self.image_size, self.image_size)
snake_case_ , snake_case_ : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ : List[Any] = self.num_labels
snake_case_ : int = CvtForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : int = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
A : List[Any] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
A : str = False
A : List[Any] = False
A : Union[str, Any] = False
A : Optional[int] = False
A : Optional[int] = False
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Optional[int] = CvtModelTester(self )
snake_case_ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> int:
return
@unittest.skip(reason="Cvt does not output attentions" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _lowerCAmelCase ( self ) -> Dict:
pass
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : str = [*signature.parameters.keys()]
snake_case_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Dict = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ : Union[str, Any] = outputs.hidden_states
snake_case_ : List[str] = len(self.model_tester.depth )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCAmelCase ( self ) -> Any:
pass
@slow
def _lowerCAmelCase ( self ) -> Optional[int]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = CvtModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
snake_case_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> int:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Tuple = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
snake_case_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : int = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 568 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = False ):
"""simple docstring"""
if not arr:
return 0
lowerCamelCase_ : Dict = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCamelCase_ : str = 0.0
for num in arr:
lowerCamelCase_ : List[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCamelCase_ : int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCamelCase : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 418 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = [state.process_index]
lowerCamelCase_ : str = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, F"""{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCamelCase_ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase_ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase_ : Any = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Dict = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = reduce(__UpperCAmelCase , '''sum''' )
lowerCamelCase_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Optional[int] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Any = reduce(__UpperCAmelCase , '''mean''' )
lowerCamelCase_ : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : int = PartialState()
state.print(F"""State: {state}""" )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 418 | 1 |
import math
import sys
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = ''
try:
with open(lowerCAmelCase__ , 'rb' ) as binary_file:
A = binary_file.read()
for dat in data:
A = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = {'0': '0', '1': '1'}
A , A = '', ''
A = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + '0'
if math.loga(lowerCAmelCase__ ).is_integer():
A = {}
for curr_key in list(lowerCAmelCase__ ):
A = lexicon.pop(lowerCAmelCase__ )
A = new_lex
A = last_match_id + '1'
index += 1
A = ''
return result
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
A = 8
try:
with open(lowerCAmelCase__ , 'wb' ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
A = read_file_binary(lowerCAmelCase__ )
A = remove_prefix(lowerCAmelCase__ )
A = decompress_data(lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 106 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Dict = self.dummy_uncond_unet
_UpperCamelCase :List[Any] = KarrasVeScheduler()
_UpperCamelCase :Optional[Any] = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = torch.manual_seed(0 )
_UpperCamelCase :List[Any] = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
_UpperCamelCase :List[Any] = torch.manual_seed(0 )
_UpperCamelCase :Dict = pipe(num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE__ )[0]
_UpperCamelCase :Dict = image[0, -3:, -3:, -1]
_UpperCamelCase :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase :List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] = '''google/ncsnpp-celebahq-256'''
_UpperCamelCase :Any = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = KarrasVeScheduler()
_UpperCamelCase :Optional[int] = KarrasVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[str] = torch.manual_seed(0 )
_UpperCamelCase :List[Any] = pipe(num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
_UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase :Any = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 355 | 0 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ,lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =Path(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple =Path(lowerCAmelCase_ )
dest_dir.mkdir(exist_ok=lowerCAmelCase_ )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ : Optional[int] =[x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ : Optional[Any] =dest_dir.joinpath(path.name )
print(lowerCAmelCase_ )
dest_path.open('w' ).write('\n'.join(lowerCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : List[str] , __a : Union[str, Any] , __a : int=13 , __a : Dict=32 , __a : str=3 , __a : Dict=4 , __a : List[Any]=[10, 20, 30, 40] , __a : List[str]=[2, 2, 3, 2] , __a : Union[str, Any]=True , __a : Dict=True , __a : Optional[Any]=37 , __a : Dict="gelu" , __a : int=10 , __a : List[Any]=0.02 , __a : Any=["stage2", "stage3", "stage4"] , __a : List[str]=[2, 3, 4] , __a : int=None , ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = parent
lowerCamelCase__: Optional[int] = batch_size
lowerCamelCase__: List[Any] = image_size
lowerCamelCase__: Optional[Any] = num_channels
lowerCamelCase__: str = num_stages
lowerCamelCase__: List[str] = hidden_sizes
lowerCamelCase__: int = depths
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Optional[int] = use_labels
lowerCamelCase__: List[str] = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: List[Any] = num_labels
lowerCamelCase__: Tuple = initializer_range
lowerCamelCase__: Tuple = out_features
lowerCamelCase__: int = out_indices
lowerCamelCase__: str = scope
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__: Dict = None
if self.use_labels:
lowerCamelCase__: List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self : Union[str, Any] , __a : Union[str, Any] , __a : List[str] , __a : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: List[str] = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__: int = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Any ):
'''simple docstring'''
lowerCamelCase__: str = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__: Optional[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , __a : Tuple , __a : Optional[int] , __a : Tuple ):
'''simple docstring'''
lowerCamelCase__: List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__: Tuple = None
lowerCamelCase__: Dict = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: str = config_and_inputs
lowerCamelCase__: Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[str] = ConvNextModelTester(self )
lowerCamelCase__: Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Any = [*signature.parameters.keys()]
lowerCamelCase__: Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(__a : List[Any] , __a : List[Any] , __a : Optional[int] ):
lowerCamelCase__: str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__: List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__: Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: str = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: List[str] = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: str = self.default_image_processor
lowerCamelCase__: Union[str, Any] = prepare_img()
lowerCamelCase__: List[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase__: List[str] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase__: Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Tuple = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
__lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCamelCase = ConvNextConfig
__lowerCamelCase = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: str = ConvNextModelTester(self )
| 306 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ = 250_004
A_ = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = MBartTokenizer
SCREAMING_SNAKE_CASE_ = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'facebook/mbart-large-en-ro'
SCREAMING_SNAKE_CASE_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE_ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def UpperCamelCase( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase_ = 1
return cls
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = MBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors='pt' )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors='pt' )
lowerCamelCase_ = targets['input_ids']
lowerCamelCase_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 42 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( a_ : Optional[int] , a_ : List[Any] , a_ : Tuple , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[Any] ) -> str:
if index == r:
for j in range(a_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase__ : Optional[int] = arr[i]
combination_util(a_ , a_ , a_ , index + 1 , a_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a_ , a_ , a_ , a_ , a_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__ ( a_ : Union[str, Any] , a_ : str , a_ : int ) -> List[Any]:
# A temporary array to store all combination one by one
UpperCAmelCase__ : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a_ , a_ , a_ , 0 , a_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCamelCase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 599 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A = """bart"""
__A = True
@st.cache(allow_output_mutation=_lowercase )
def UpperCamelCase__ ( ):
if LOAD_DENSE_INDEX:
snake_case : Tuple = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case : Optional[Any] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case : Optional[int] = qar_model.eval()
else:
snake_case : str = (None, None)
if MODEL_TYPE == "bart":
snake_case : Dict = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case : List[Any] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case : Dict = sas_model.eval()
else:
snake_case : str = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase )
def UpperCamelCase__ ( ):
if LOAD_DENSE_INDEX:
snake_case : str = faiss.StandardGpuResources()
snake_case : Optional[Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )['train']
snake_case : Union[str, Any] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case : Tuple = faiss.IndexFlatIP(128 )
snake_case : Any = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase )
wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU
else:
snake_case : Union[str, Any] = (None, None)
snake_case : Optional[int] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase )
def UpperCamelCase__ ( ):
snake_case : int = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case : Dict = elia['train_eli5']
snake_case : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case : Union[str, Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowercase )
return (elia_train, eli5_train_q_index)
__A = load_indexes()
__A = load_models()
__A = load_train_data()
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : Optional[int]=10 ):
snake_case : int = embed_questions_for_retrieval([question] , _lowercase , _lowercase )
snake_case : Dict = eli5_train_q_index.search(_lowercase , _lowercase )
snake_case : int = [elia_train[int(_lowercase )] for i in I[0]]
return nn_examples
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : Tuple="wiki40b" , lowercase__ : List[str]="dense" , lowercase__ : int=10 ):
if source == "none":
snake_case : Dict = (' <P> '.join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case : int = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
snake_case : Tuple = query_es_index(
_lowercase , _lowercase , index_name="english_wiki40b_snippets_100w" , n_results=_lowercase , )
snake_case : Tuple = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
snake_case : List[Any] = 'question: {} context: {}'.format(_lowercase , _lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase__ : None),
} )
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : List[Any]=64 , lowercase__ : List[Any]=256 , lowercase__ : str=False , lowercase__ : Union[str, Any]=2 , lowercase__ : Dict=0.95 , lowercase__ : List[Any]=0.8 ):
with torch.no_grad():
snake_case : Any = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
__A = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
__A = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
__A = st.sidebar.checkbox("Demo options")
if demo_options:
__A = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__A = action_list.index(action_st)
__A = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__A = show_type == """Show full text of passages"""
else:
__A = 3
__A = True
__A = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__A = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
__A = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__A = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__A = """wiki40b"""
__A = """dense"""
__A = """beam"""
__A = 2
__A = 64
__A = 256
__A = None
__A = None
__A = st.sidebar.checkbox("Generation options")
if generate_options:
__A = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
__A = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__A = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__A = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__A = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A = None
# start main text
__A = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
__A = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A = st.text_input("Enter your question here:", "")
else:
__A = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__A = make_support(question, source=wiki_source, method="dense", n_results=10)
__A = make_support(question, source=wiki_source, method="sparse", n_results=10)
__A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A = support_list[:10]
__A = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
__A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(" ", "_"))
__A = res[1].strip()
if sec_titles == "":
__A = """[{}]({})""".format(res[0], wiki_url)
else:
__A = sec_titles.split(" & ")
__A = """ & """.join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__A = find_nearest_training(question)
__A = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__A = [
"""{}. {}""".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__A = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 134 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCamelCase: Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = ["audio_values", "audio_mask"]
def __init__( self: Any, lowerCamelCase_: int=2048, lowerCamelCase_: str=1, lowerCamelCase_: str=[16, 16], lowerCamelCase_: Union[str, Any]=128, lowerCamelCase_: Tuple=44100, lowerCamelCase_: Union[str, Any]=86, lowerCamelCase_: Optional[Any]=2048, lowerCamelCase_: Dict=0.0, **lowerCamelCase_: str, ):
super().__init__(
feature_size=lowerCamelCase_, sampling_rate=lowerCamelCase_, padding_value=lowerCamelCase_, **lowerCamelCase_, )
lowercase__ : List[str] = spectrogram_length
lowercase__ : Union[str, Any] = num_channels
lowercase__ : int = patch_size
lowercase__ : Union[str, Any] = feature_size // self.patch_size[1]
lowercase__ : Optional[int] = n_fft
lowercase__ : int = sampling_rate // hop_length_to_sampling_rate
lowercase__ : str = sampling_rate
lowercase__ : Union[str, Any] = padding_value
lowercase__ : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=lowerCamelCase_, min_frequency=0.0, max_frequency=2_2_0_5_0.0, sampling_rate=lowerCamelCase_, norm='slaney', mel_scale='slaney', ).T
def snake_case__( self: Tuple, lowerCamelCase_: np.array ):
lowercase__ : Optional[Any] = spectrogram(
lowerCamelCase_, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=8_0.0, )
lowercase__ : Optional[Any] = log_spec[:, :-1]
lowercase__ : int = log_spec - 2_0.0
lowercase__ : Dict = np.clip(log_spec / 4_0.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self: Dict, lowerCamelCase_: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase_: Optional[Union[str, TensorType]] = None, lowerCamelCase_: Optional[bool] = True, lowerCamelCase_: Optional[int] = None, lowerCamelCase_: bool = False, lowerCamelCase_: bool = False, **lowerCamelCase_: str, ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ : Any = isinstance(lowerCamelCase_, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Optional[Any] = is_batched_numpy or (
isinstance(lowerCamelCase_, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : int = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_, np.ndarray ):
lowercase__ : Optional[int] = np.asarray(lowerCamelCase_, dtype=np.floataa )
elif isinstance(lowerCamelCase_, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], lowerCamelCase_ ):
lowercase__ : str = [np.asarray(lowerCamelCase_, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ : Any = np.array(lowerCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
lowercase__ : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ : Tuple = np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_ ) ):
lowercase__ : Union[str, Any] = audio_features[i]
lowercase__ : str = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ : Dict = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowercase__ : int = {'audio_values': padded_audio_features}
lowercase__ : List[Any] = BatchFeature(data=lowerCamelCase_, tensor_type=lowerCamelCase_ )
return encoded_inputs
| 266 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
_lowercase =AltDiffusionPipeline
_lowercase =TEXT_TO_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase =TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowerCAmelCase_ = CLIPTextModel(UpperCamelCase_ )
lowerCAmelCase_ = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase_ = 77
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[int]:
if str(UpperCamelCase_ ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase_ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __a ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __a ( self ) -> Dict:
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase_ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowerCAmelCase_ = text_encoder
lowerCAmelCase_ = AltDiffusionPipeline(**UpperCamelCase_ )
lowerCAmelCase_ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase_ = 'A photo of an astronaut'
lowerCAmelCase_ = alt_pipe(**UpperCamelCase_ )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ) -> List[str]:
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase_ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowerCAmelCase_ = text_encoder
lowerCAmelCase_ = AltDiffusionPipeline(**UpperCamelCase_ )
lowerCAmelCase_ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase_ = alt_pipe(**UpperCamelCase_ )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> int:
lowerCAmelCase_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=UpperCamelCase_ )
lowerCAmelCase_ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ = 'A painting of a squirrel eating a burger'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ) -> int:
lowerCAmelCase_ = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
lowerCAmelCase_ = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowerCAmelCase_ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase_ = 'A painting of a squirrel eating a burger'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="numpy" )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 704 |
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279 | 0 |
'''simple docstring'''
from math import loga
def a__ ( lowerCAmelCase__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=2 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : int=3 , lowerCamelCase__ : str=32 * 4 , lowerCamelCase__ : List[str]=32 * 6 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Dict=32 , ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_auxiliary_loss
UpperCamelCase__ : Optional[Any] = num_queries
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Tuple = min_size
UpperCamelCase__ : Tuple = max_size
UpperCamelCase__ : List[Any] = num_labels
UpperCamelCase__ : List[Any] = mask_feature_size
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
UpperCamelCase__ : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
UpperCamelCase__ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
UpperCamelCase__ : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
UpperCamelCase__ : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = output.encoder_hidden_states
UpperCamelCase__ : str = output.pixel_decoder_hidden_states
UpperCamelCase__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase__ : List[Any] = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : List[str] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
UpperCamelCase__ : str = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
UpperCamelCase__ : Any = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
UpperCamelCase__ : Any = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A: Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A: Union[str, Any] = False
A: Optional[Any] = False
A: Union[str, Any] = False
A: Optional[Any] = False
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MaskFormerModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ : Tuple = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Any = (self.model_tester.min_size,) * 2
UpperCamelCase__ : List[str] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
UpperCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
UpperCamelCase__ : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
UpperCamelCase__ : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : List[str] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Any = True
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
UpperCamelCase__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : Optional[int] = 1E-4
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Any = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : Optional[int] = prepare_img()
UpperCamelCase__ : Dict = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**lowerCamelCase__ )
# masks_queries_logits
UpperCamelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Union[str, Any] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ : Any = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
UpperCamelCase__ : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ : Optional[int] = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
UpperCamelCase__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Optional[int] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ : Any = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
UpperCamelCase__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ : List[str] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCamelCase__ : int = inputs['''pixel_values'''].to(lowerCamelCase__ )
UpperCamelCase__ : Tuple = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
UpperCamelCase__ : Optional[int] = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 106 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ["MobileViTFeatureExtractor"]
__UpperCamelCase : Optional[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Dict , _snake_case : List[Any]=2 , _snake_case : str=True , _snake_case : Tuple=False , _snake_case : Any=10 , _snake_case : List[str]=3 , _snake_case : Optional[Any]=32 * 4 , _snake_case : Optional[int]=32 * 6 , _snake_case : int=4 , _snake_case : Any=32 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = is_training
A__ = use_auxiliary_loss
A__ = num_queries
A__ = num_channels
A__ = min_size
A__ = max_size
A__ = num_labels
A__ = mask_feature_size
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
A__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
A__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
A__ = (torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
A__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = output.encoder_hidden_states
A__ = output.pixel_decoder_hidden_states
A__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_config.decoder_layers )
def _a ( self : Any , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
A__ = MaskFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
A__ = model(pixel_values=__snake_case , pixel_mask=__snake_case )
A__ = model(__snake_case , output_hidden_states=__snake_case )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def _a ( self : Any , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : str ):
"""simple docstring"""
A__ = MaskFormerForInstanceSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(_snake_case : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A__ = model(pixel_values=__snake_case , pixel_mask=__snake_case )
A__ = model(__snake_case )
comm_check_on_output(__snake_case )
A__ = model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A__ : Optional[int] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : int = False
A__ : Any = False
A__ : List[str] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = MaskFormerModelTester(self )
A__ = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def _a ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def _a ( self : Dict ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
A__ = MaskFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = (self.model_tester.min_size,) * 2
A__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__snake_case ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__snake_case ),
'''class_labels''': torch.zeros(2 , 10 , device=__snake_case ).long(),
}
A__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__snake_case )
A__ = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__snake_case ).to(__snake_case )
A__ = model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def _a ( self : int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
A__ = self.all_model_classes[1]
A__ = self.model_tester.prepare_config_and_inputs()
A__ = model_class(__snake_case )
model.to(__snake_case )
model.train()
A__ = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def _a ( self : Any ):
"""simple docstring"""
A__ = self.all_model_classes[1]
A__ = self.model_tester.prepare_config_and_inputs()
A__ = True
A__ = True
A__ = model_class(__snake_case )
model.to(__snake_case )
model.train()
A__ = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
A__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
A__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE__ = 1e-4
def A ( ) -> Any:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Any ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(__snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(__snake_case , return_tensors='pt' ).to(__snake_case )
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
A__ = model(**__snake_case )
A__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
A__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
A__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def _a ( self : int ):
"""simple docstring"""
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(__snake_case )
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(__snake_case , return_tensors='pt' ).to(__snake_case )
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
A__ = model(**__snake_case )
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
A__ = torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
A__ = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(__snake_case )
.eval()
)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(__snake_case , return_tensors='pt' ).to(__snake_case )
A__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
A__ = model(**__snake_case )
# masks_queries_logits
A__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.77_11]]
A__ = torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
A__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
A__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(__snake_case )
.eval()
)
A__ = self.default_image_processor
A__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
A__ = inputs['''pixel_values'''].to(__snake_case )
A__ = [el.to(__snake_case ) for el in inputs['''mask_labels''']]
A__ = [el.to(__snake_case ) for el in inputs['''class_labels''']]
with torch.no_grad():
A__ = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 9 | def _lowerCamelCase ( a_ : str):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = credit_card_number
lowerCamelCase :Tuple = 0
lowerCamelCase :Any = len(a_) - 2
for i in range(a_ , -1 , -2):
# double the value of every second digit
lowerCamelCase :Any = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase :Optional[int] = cc_number[:i] + str(a_) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Union[str, Any] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters.")
return False
if not 13 <= len(a_) <= 16:
print(F"{error_message} of its length.")
return False
if not validate_initial_digits(a_):
print(F"{error_message} of its first two digits.")
return False
if not luhn_validation(a_):
print(F"{error_message} it fails the Luhn check.")
return False
print(F"{credit_card_number} is a valid credit card number.")
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 166 | 0 |
import numpy as np
class lowerCAmelCase_ :
def __init__( self : Dict ):
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __eq__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
return self.position == cell.position
def __snake_case ( self : str ):
print(self.position )
class lowerCAmelCase_ :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any=(5, 5) ):
lowerCAmelCase__ = np.zeros(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = world_size[0]
lowerCAmelCase__ = world_size[1]
def __snake_case ( self : Optional[Any] ):
print(self.w )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowerCAmelCase__ = cell.position[0]
lowerCAmelCase__ = cell.position[1]
lowerCAmelCase__ = []
for n in neughbour_cord:
lowerCAmelCase__ = current_x + n[0]
lowerCAmelCase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowerCAmelCase__ = Cell()
lowerCAmelCase__ = (x, y)
lowerCAmelCase__ = cell
neighbours.append(SCREAMING_SNAKE_CASE_ )
return neighbours
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
_open.append(lowercase__ )
while _open:
lowerCAmelCase__ = np.argmin([n.f for n in _open] )
lowerCAmelCase__ = _open[min_f]
_closed.append(_open.pop(lowercase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase__ ):
for c in _closed:
if c == n:
continue
lowerCAmelCase__ = current.g + 1
lowerCAmelCase__ , lowerCAmelCase__ = n.position
lowerCAmelCase__ , lowerCAmelCase__ = goal.position
lowerCAmelCase__ = (ya - ya) ** 2 + (xa - xa) ** 2
lowerCAmelCase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase__ )
lowerCAmelCase__ = []
while current.parent is not None:
path.append(current.position )
lowerCAmelCase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = Gridworld()
# Start position and goal
_UpperCAmelCase : List[str] = Cell()
_UpperCAmelCase : int = (0, 0)
_UpperCAmelCase : Optional[Any] = Cell()
_UpperCAmelCase : List[Any] = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
_UpperCAmelCase : Optional[int] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_UpperCAmelCase : Dict = 1
print(world.w)
| 288 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowerCAmelCase__ = img
lowerCAmelCase__ = img.shape[1]
lowerCAmelCase__ = img.shape[0]
lowerCAmelCase__ = dst_width
lowerCAmelCase__ = dst_height
lowerCAmelCase__ = self.src_w / self.dst_w
lowerCAmelCase__ = self.src_h / self.dst_h
lowerCAmelCase__ = lowerCAmelCase__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __snake_case ( self : List[str] ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase__ = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_x * x )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_UpperCAmelCase , _UpperCAmelCase : List[str] = 800, 600
_UpperCAmelCase : Tuple = imread("image_data/lena.jpg", 1)
_UpperCAmelCase : str = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 288 | 1 |
def a__ ( __UpperCamelCase , __UpperCamelCase = 0 ):
SCREAMING_SNAKE_CASE_ = length or len(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE_ = True
return list_data if not swapped else bubble_sort(__UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Dict = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 1000
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
A_ = 192
A_ = 768
A_ = 12
A_ = 3
elif vit_name[9:].startswith("small" ):
A_ = 384
A_ = 1536
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
A_ = 768
A_ = 2304
A_ = 8
A_ = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
elif vit_name[4:].startswith("huge" ):
A_ = 1280
A_ = 5120
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(__UpperCamelCase ).eval()
else:
A_ = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" )
A_ = encoding["pixel_values"]
A_ = model(__UpperCamelCase )
if base_model:
A_ = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase ,outputs.pooler_output ,atol=1E-3 )
else:
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 86 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a__ :
def __init__( self : Dict , lowerCamelCase_ : List[str] , ):
a_ : List[Any] = parent
a_ : str = 1_3
a_ : Optional[int] = 7
a_ : int = True
a_ : Any = True
a_ : List[Any] = True
a_ : Any = 9_9
a_ : int = 3_2
a_ : Tuple = 2
a_ : Union[str, Any] = 4
a_ : int = 3_7
a_ : Optional[int] = "gelu"
a_ : Tuple = 0.1
a_ : Dict = 0.1
a_ : str = 5_1_2
a_ : Tuple = 1_6
a_ : Tuple = 2
a_ : str = 0.0_2
a_ : int = 3
a_ : Optional[int] = 4
a_ : Dict = None
def UpperCAmelCase( self : int ):
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Tuple = None
if self.use_input_mask:
a_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Any = None
a_ : Dict = None
a_ : Union[str, Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase( self : int ):
(
a_
) : List[str] = self.prepare_config_and_inputs()
a_ : str = True
a_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ):
a_ : Tuple = TFEsmModel(config=__lowerCamelCase )
a_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
a_ : Optional[int] = model(__lowerCamelCase )
a_ : List[Any] = [input_ids, input_mask]
a_ : List[Any] = model(__lowerCamelCase )
a_ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , ):
a_ : Dict = True
a_ : Union[str, Any] = TFEsmModel(config=__lowerCamelCase )
a_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
a_ : str = model(__lowerCamelCase )
a_ : Tuple = [input_ids, input_mask]
a_ : Union[str, Any] = model(__lowerCamelCase , encoder_hidden_states=__lowerCamelCase )
# Also check the case where encoder outputs are not passed
a_ : str = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
a_ : Dict = TFEsmForMaskedLM(config=__lowerCamelCase )
a_ : Any = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : Any = self.num_labels
a_ : Tuple = TFEsmForTokenClassification(config=__lowerCamelCase )
a_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
a_ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase( self : Dict ):
a_ : int = self.prepare_config_and_inputs()
(
a_
) : Tuple = config_and_inputs
a_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( _A , _A , unittest.TestCase ):
lowerCamelCase__: List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__: List[str] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: List[str] = False
def UpperCAmelCase( self : int ):
a_ : Any = TFEsmModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCAmelCase( self : List[str] ):
self.config_tester.run_common_tests()
def UpperCAmelCase( self : int ):
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase( self : Optional[int] ):
a_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCamelCase )
def UpperCAmelCase( self : int ):
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase( self : Optional[Any] ):
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase( self : str ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = TFEsmModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCAmelCase( self : List[str] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCAmelCase( self : List[Any] ):
pass
def UpperCAmelCase( self : List[Any] ):
a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(__lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a_ : Union[str, Any] = model.get_bias()
assert isinstance(__lowerCamelCase , __lowerCamelCase )
for k, v in name.items():
assert isinstance(__lowerCamelCase , tf.Variable )
else:
a_ : List[Any] = model.get_output_embeddings()
assert x is None
a_ : List[Any] = model.get_bias()
assert name is None
@require_tf
class a__ ( unittest.TestCase ):
@slow
def UpperCAmelCase( self : Union[str, Any] ):
a_ : Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
a_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
a_ : Any = model(__lowerCamelCase )[0]
a_ : int = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __lowerCamelCase )
# compare the actual values for a slice.
a_ : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCAmelCase( self : Optional[int] ):
a_ : Optional[int] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
a_ : int = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
a_ : Optional[Any] = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
a_ : Dict = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 717 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__lowerCamelCase = None
def _a ( ):
a_ : Tuple = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__UpperCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__UpperCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( __UpperCamelCase ):
a_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : Union[str, Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _a ( __UpperCamelCase ):
def remove_articles(__UpperCamelCase ):
return ARTICLES_REGEX.sub(""" """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
a_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _a ( __UpperCamelCase ):
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def _a ( __UpperCamelCase , __UpperCamelCase ):
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = get_tokens(__UpperCamelCase )
a_ : str = get_tokens(__UpperCamelCase )
a_ : Tuple = collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
a_ : str = sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a_ : Any = 1.0 * num_same / len(__UpperCamelCase )
a_ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase )
a_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Tuple = {}
a_ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : str = qa["""id"""]
a_ : Dict = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a_ : Union[str, Any] = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a_ : int = preds[qid]
# Take max over all gold answers
a_ : Union[str, Any] = max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
a_ : int = max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[Any] = {}
for qid, s in scores.items():
a_ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
a_ : List[Any] = float(not qid_to_has_ans[qid] )
else:
a_ : List[Any] = s
return new_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
if not qid_list:
a_ : List[Any] = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
a_ : Tuple = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for k in new_eval:
a_ : Any = new_eval[k]
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
plt.step(__UpperCamelCase , __UpperCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ):
a_ : List[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
a_ : int = 0.0
a_ : Tuple = 1.0
a_ : Union[str, Any] = 0.0
a_ : List[str] = [1.0]
a_ : int = [0.0]
a_ : Optional[int] = 0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a_ : Union[str, Any] = true_pos / float(i + 1 )
a_ : Tuple = true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
a_ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a_ : Union[str, Any] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
a_ : Any = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
a_ : int = {k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
a_ : Dict = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_exact""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_f1""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_oracle""" )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not qid_list:
return
a_ : List[Any] = [na_probs[k] for k in qid_list]
a_ : List[Any] = np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__UpperCamelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a_ : Tuple = num_no_ans
a_ : List[Any] = cur_score
a_ : List[Any] = 0.0
a_ : int = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a_ : Optional[Any] = scores[qid]
else:
if preds[qid]:
a_ : Optional[int] = -1
else:
a_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a_ : str = cur_score
a_ : int = na_probs[qid]
return 1_00.0 * best_score / len(__UpperCamelCase ), best_thresh
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ , a_ : Optional[int] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ , a_ : Optional[Any] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ : Dict = best_exact
a_ : Optional[Any] = exact_thresh
a_ : Optional[Any] = best_fa
a_ : Union[str, Any] = fa_thresh
def _a ( ):
with open(OPTS.data_file ) as f:
a_ : str = json.load(__UpperCamelCase )
a_ : Optional[int] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
a_ : Dict = json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a_ : List[str] = json.load(__UpperCamelCase )
else:
a_ : List[str] = {k: 0.0 for k in preds}
a_ : Any = make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
a_ : Any = [k for k, v in qid_to_has_ans.items() if v]
a_ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a_ , a_ : List[str] = get_raw_scores(__UpperCamelCase , __UpperCamelCase )
a_ : int = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : Optional[int] = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
a_ : Dict = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """HasAns""" )
if no_ans_qids:
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 478 | 0 |
from __future__ import annotations
def __snake_case ( _UpperCamelCase , _UpperCamelCase = None ) -> list[list[str]]:
_a = word_bank or []
# create a table
_a = len(_UpperCamelCase ) + 1
_a = []
for _ in range(_UpperCamelCase ):
table.append([] )
# seed value
_a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCamelCase )] == word:
_a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCamelCase )]:
combination.reverse()
return table[len(_UpperCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 487 |
def __snake_case ( _UpperCamelCase ) -> int:
_a = len(_UpperCamelCase )
_a = sum(_UpperCamelCase )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_a = True
for i in range(1 , s + 1 ):
_a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 487 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
lowerCamelCase : int = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = np.zeros((rows, columns) )
lowerCamelCase : List[str] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowerCamelCase : Dict = (table[i][j] - total) / upper[j][j]
lowerCamelCase : Dict = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase_( SCREAMING_SNAKE_CASE_ = "isbn/0140328726" ):
'''simple docstring'''
lowerCamelCase : List[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
lowerCamelCase : Tuple = f"""{olid} is not a valid Open Library olid"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
lowerCamelCase : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase : Union[str, Any] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
lowerCamelCase : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = ", ".join(SCREAMING_SNAKE_CASE_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 231 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.