code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE :Union[str, Any] = 'Create a default config file for Accelerate with only a few flags set.'
def UpperCAmelCase ( a_="no" , a_ = default_json_config_file , a_ = False ) -> Optional[int]:
"""simple docstring"""
__A = Path(a_ )
path.parent.mkdir(parents=a_ , exist_ok=a_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
__A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
__A = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__A = torch.cuda.device_count()
__A = num_gpus
__A = False
if num_gpus > 1:
__A = "MULTI_GPU"
else:
__A = "NO"
elif is_xpu_available() and use_xpu:
__A = torch.xpu.device_count()
__A = num_xpus
__A = False
if num_xpus > 1:
__A = "MULTI_XPU"
else:
__A = "NO"
elif is_npu_available():
__A = torch.npu.device_count()
__A = num_npus
__A = False
if num_npus > 1:
__A = "MULTI_NPU"
else:
__A = "NO"
else:
__A = 0
__A = True
__A = 1
__A = "NO"
__A = ClusterConfig(**a_ )
config.to_json_file(a_ )
return path
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = parser.add_parser("default" , parents=a_ , help=a_ , formatter_class=a_ )
parser.add_argument(
"--config_file" , default=a_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=a_ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=a_ )
return parser
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
__A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 15 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> int:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' ,__UpperCAmelCase ,)
super().__init__(args=__UpperCAmelCase ,**__UpperCAmelCase )
| 154 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 100 ):
"""simple docstring"""
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | 1 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "blip_2_vision_model"
def __init__( self : List[str] , __lowerCamelCase : Tuple=1408 , __lowerCamelCase : Union[str, Any]=6144 , __lowerCamelCase : Union[str, Any]=39 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[Any]=224 , __lowerCamelCase : str=14 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=0.00001 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Tuple=1e-10 , __lowerCamelCase : str=True , **__lowerCamelCase : Dict , ) -> List[str]:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = qkv_bias
@classmethod
def lowercase_ ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "blip_2_qformer"
def __init__( self : List[Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : List[str]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Optional[int]="absolute" , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : int=1408 , **__lowerCamelCase : Dict , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = cross_attention_frequency
SCREAMING_SNAKE_CASE__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls : int , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "blip-2"
a = True
def __init__( self : List[Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=32 , **__lowerCamelCase : Union[str, Any] ) -> Tuple:
super().__init__(**__lowerCamelCase )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE__ = BlipaVisionConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BlipaQFormerConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[text_model_type](**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE__ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE__ = num_query_tokens
SCREAMING_SNAKE_CASE__ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = 1.0
SCREAMING_SNAKE_CASE__ = 0.02
@classmethod
def lowercase_ ( cls : Tuple , __lowerCamelCase : BlipaVisionConfig , __lowerCamelCase : BlipaQFormerConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : int , ) -> List[str]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , )
def lowercase_ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 218 |
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE : List[str] = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_SCREAMING_SNAKE_CASE : str = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Tuple:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowercase_ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=False ) -> str:
if gpus is None:
SCREAMING_SNAKE_CASE__ = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
SCREAMING_SNAKE_CASE__ = [dict(zip(__lowerCamelCase , __lowerCamelCase ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.scorer.predict(__lowerCamelCase , gpus=__lowerCamelCase , progress_bar=__lowerCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 218 | 1 |
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ (a_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , _A = 1 , _A = 5_0 , _A = None , _A = "pil" , _A = True , **_A , ):
'''simple docstring'''
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase , UpperCAmelCase = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 273 | 1 |
import string
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase = string.ascii_uppercase.find(A__ )
UpperCAmelCase = num - key
if num < 0:
UpperCAmelCase = num + len(string.ascii_uppercase )
UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input('''Encrypted message: ''' )
UpperCAmelCase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
__lowercase = """time_series_transformer"""
__lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
_snake_case = embedding_dimension
else:
_snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(_A ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=_A , **_A )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , _A : Dict , _A : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[str] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
"""simple docstring"""
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__SCREAMING_SNAKE_CASE : int = int(_A )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE : Optional[int] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__SCREAMING_SNAKE_CASE : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample
__SCREAMING_SNAKE_CASE : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 303 | 0 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Tuple ):
__UpperCAmelCase = 0
def a ( self : str ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def a ( self : Dict ):
__UpperCAmelCase = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__A , __A )
def a ( self : List[Any] ):
__UpperCAmelCase = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def a ( self : Any ):
__UpperCAmelCase = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__A , __A )
def a ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCAmelCase = os.path.join(__A , '''fake-roberta''' )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
__UpperCAmelCase = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def a ( self : Optional[int] ):
try:
AutoConfig.register('''custom''' , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register('''model''' , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register('''bert''' , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
__UpperCAmelCase = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def a ( self : Optional[int] ):
with self.assertRaisesRegex(
__A , '''bert-base is not a local folder and is not a valid model identifier''' ):
__UpperCAmelCase = AutoConfig.from_pretrained('''bert-base''' )
def a ( self : List[Any] ):
with self.assertRaisesRegex(
__A , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__UpperCAmelCase = AutoConfig.from_pretrained(__A , revision='''aaaaaa''' )
def a ( self : str ):
with self.assertRaisesRegex(
__A , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def a ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__A )
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
__UpperCAmelCase = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def a ( self : List[Any] ):
class _UpperCAmelCase ( A__ ):
a__ : int = "new-model"
try:
AutoConfig.register('''new-model''' , __A )
# If remote code is not set, the default is to use local
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
__UpperCAmelCase = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 351 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 86 | 0 |
snake_case : Dict = "Input must be a string of 8 numbers plus letter"
snake_case : Any = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCAmelCase_ ( _snake_case : str ) -> bool:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : List[str] = F'''Expected string as input, found {type(_snake_case ).__name__}'''
raise TypeError(_snake_case )
__magic_name__ : int = spanish_id.replace("-" , "" ).upper()
if len(_snake_case ) != 9:
raise ValueError(_snake_case )
try:
__magic_name__ : Optional[int] = int(spanish_id_clean[0:8] )
__magic_name__ : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case ) from ex
if letter.isdigit():
raise ValueError(_snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : List[str] = {"vocab_file": "spiece.model"}
snake_case : List[str] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
snake_case : Tuple = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
snake_case : List[str] = "▁"
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__magic_name__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__magic_name__ : Dict = do_lower_case
__magic_name__ : Tuple = remove_space
__magic_name__ : Union[str, Any] = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__magic_name__ : List[str] = self.__dict__.copy()
__magic_name__ : Any = None
return state
def __setstate__( self , _a ):
__magic_name__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__magic_name__ : str = {}
__magic_name__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.remove_space:
__magic_name__ : List[Any] = " ".join(inputs.strip().split() )
else:
__magic_name__ : str = inputs
__magic_name__ : int = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__magic_name__ : str = unicodedata.normalize("NFKD" , _a )
__magic_name__ : Tuple = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__magic_name__ : int = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = self.preprocess_text(_a )
__magic_name__ : Dict = self.sp_model.encode(_a , out_type=_a )
__magic_name__ : Any = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__magic_name__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__magic_name__ : List[str] = cur_pieces[1:]
else:
__magic_name__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = []
__magic_name__ : Union[str, Any] = ""
__magic_name__ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__magic_name__ : List[Any] = True
__magic_name__ : Optional[int] = []
else:
current_sub_tokens.append(_a )
__magic_name__ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[int] = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : List[str] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
__magic_name__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 281 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not postfix_notation:
return 0
A_ = {"""+""", """-""", """*""", """/"""}
A_ = []
for token in postfix_notation:
if token in operations:
A_ , A_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 101 | 0 |
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : str = x
_lowercase : int = y
for step in range(lowerCamelCase_ ): # noqa: B007
_lowercase : Optional[Any] = a * a - b * b + x
_lowercase : List[str] = 2 * a * b + y
_lowercase : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase_( lowerCamelCase_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase_( lowerCamelCase_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCamelCase_ , 1 , 1 ) )
def UpperCamelCase_( lowerCamelCase_ = 800 , lowerCamelCase_ = 600 , lowerCamelCase_ = -0.6 , lowerCamelCase_ = 0 , lowerCamelCase_ = 3.2 , lowerCamelCase_ = 50 , lowerCamelCase_ = True , ) -> Image.Image:
_lowercase : Optional[Any] = Image.new('RGB' , (image_width, image_height) )
_lowercase : str = img.load()
# loop through the image-coordinates
for image_x in range(lowerCamelCase_ ):
for image_y in range(lowerCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
_lowercase : Dict = figure_width / image_width * image_height
_lowercase : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowercase : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowercase : Any = get_distance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowercase : Dict = get_color_coded_rgb(lowerCamelCase_ )
else:
_lowercase : Any = get_black_and_white_rgb(lowerCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 1 |
from scipy.stats import pearsonr
import datasets
UpperCamelCase__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCamelCase__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCamelCase__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
if return_pvalue:
__lowerCAmelCase = pearsonr(_A , _A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A , _A )[0] )}
| 102 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A=0.0 , _A = None , _A = "geglu" , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = "layer_norm" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(_A , _A )
else:
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = Attention(
query_dim=_A , heads=_A , dim_head=_A , dropout=_A , bias=_A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(_A , _A )
if self.use_ada_layer_norm
else nn.LayerNorm(_A , elementwise_affine=_A )
)
__lowerCAmelCase = Attention(
query_dim=_A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_A , dim_head=_A , dropout=_A , bias=_A , upcast_attention=_A , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = FeedForward(_A , dropout=_A , activation_fn=_A , final_dropout=_A )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
_A , _A , _A , hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(_A )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_A , **_A , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(_A , _A ) if self.use_ada_layer_norm else self.norma(_A )
)
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=_A , attention_mask=_A , **_A , )
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCAmelCase = self.ff(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A = None , _A = 4 , _A = 0.0 , _A = "geglu" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(_A , _A )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(_A , _A , approximate="tanh" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(_A , _A )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(_A , _A )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_A )
# project dropout
self.net.append(nn.Dropout(_A ) )
# project out
self.net.append(nn.Linear(_A , _A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_A ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
for module in self.net:
__lowerCAmelCase = module(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A = "none" ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
__lowerCAmelCase = approximate
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
__lowerCAmelCase = self.gelu(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , dim_out * 2 )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.proj(_A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_A )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
return x * torch.sigmoid(1.7_02 * x )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Embedding(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(_A , 2 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale) + shift
return x
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , 6 * embedding_dim , bias=_A )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A , eps=1E-6 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=None ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A , _A , hidden_dtype=_A ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A = None , _A = 1E-5 ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(_A )
__lowerCAmelCase = nn.Linear(_A , out_dim * 2 )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if self.act:
__lowerCAmelCase = self.act(_A )
__lowerCAmelCase = self.linear(_A )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 )
__lowerCAmelCase = F.group_norm(_A , self.num_groups , eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 102 | 1 |
from __future__ import annotations
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : str = extended_euclid(snake_case , a % b )
__SCREAMING_SNAKE_CASE : List[str] = a // b
return (y, x - k * y)
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = extended_euclid(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = na * na
__SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( snake_case , snake_case ):
"""simple docstring"""
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = extended_euclid(snake_case , snake_case )
if b < 0:
__SCREAMING_SNAKE_CASE : Any = (b % n + n) % n
return b
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = invert_modulo(snake_case , snake_case ), invert_modulo(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = na * na
__SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 303 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase ='\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
lowercase ='\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
lowercase ='\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
_UpperCAmelCase : int =simple_accuracy(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Optional[int] =float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : List[str] =float(pearsonr(__lowerCamelCase , __lowerCamelCase )[0] )
_UpperCAmelCase : Optional[Any] =float(spearmanr(__lowerCamelCase , __lowerCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowerCAmelCase ( self , snake_case , snake_case) -> Any:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case , snake_case)}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case , snake_case)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case , snake_case)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case , snake_case)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
| 242 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case_, snake_case_ = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter integers separated by spaces: ''')
lowerCAmelCase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 8 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase_ : List[str] = get_logger()
lowerCamelCase_ : Optional[dict] = None
class __A ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , __A=None , __A=None , **__A ) -> Any:
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
a =device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
a =str(jax.devices()[0] )
a =jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__A ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a ={}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a ={'''dtype''': jnp.intaa}
else:
a ={'''dtype''': jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
a =np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , '''__array__''' ) and not isinstance(__A , jax.Array ):
a =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Mapping:
a =self.numpy_arrow_extractor().extract_row(__A )
a =self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> "jax.Array":
a =self.numpy_arrow_extractor().extract_column(__A )
a =self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
a =self.recursive_tensorize(__A )
a =self._consolidate(__A )
return column
def SCREAMING_SNAKE_CASE ( self , __A ) -> Mapping:
a =self.numpy_arrow_extractor().extract_batch(__A )
a =self.python_features_decoder.decode_batch(__A )
a =self.recursive_tensorize(__A )
for column_name in batch:
a =self._consolidate(batch[column_name] )
return batch
| 215 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : Any = random.Random()
def _A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
a =global_rng
a =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4000 , __A=False , __A=True , ) -> Optional[Any]:
a =parent
a =batch_size
a =min_seq_length
a =max_seq_length
a =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a =padding_value
a =sampling_rate
a =return_attention_mask
a =do_normalize
a =feature_size
a =chunk_length
a =hop_length
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False ) -> str:
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
a =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a =[np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a =self.feature_extraction_class.from_pretrained(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
a =self.feature_extraction_class.from_json_file(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a =feature_extractor(__A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a =np.asarray(__A )
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
a =[x[: feature_extractor.n_samples] for x in speech_inputs]
a =[np.asarray(__A ) for speech_input in speech_inputs_truncated]
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =np.random.rand(100 , 32 ).astype(np.floataa )
a =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a =ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a =self._load_datasamples(1 )
a =WhisperFeatureExtractor()
a =feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =self._load_datasamples(1 )[0]
a =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) )
| 215 | 1 |
'''simple docstring'''
import math
def snake_case_ (_a : float , _a : float ):
return math.pow(_a , 2 ) - a
def snake_case_ (_a : float ):
return 2 * x
def snake_case_ (_a : float ):
UpperCAmelCase = 2.0
while start <= a:
UpperCAmelCase = math.pow(_a , 2 )
return start
def snake_case_ (_a : float , _a : int = 9_9_9_9 , _a : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase = get_initial_point(_a )
for _ in range(_a ):
UpperCAmelCase = value
UpperCAmelCase = value - fx(_a , _a ) / fx_derivative(_a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__A = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = tmp_path_factory.getbasetemp() / """cache"""
__lowerCamelCase = test_hf_cache_home / """datasets"""
__lowerCamelCase = test_hf_cache_home / """metrics"""
__lowerCamelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowerCamelCase_ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowerCamelCase_ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowerCamelCase_ ) )
__lowerCamelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowerCamelCase_ ) )
__lowerCamelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCamelCase_ ) )
@pytest.fixture(autouse=lowerCamelCase_ , scope='session' )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowerCamelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowerCamelCase_ )
| 358 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def lowercase_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 348 | 0 |
def a__ ( A_ = 600851475143 ):
'''simple docstring'''
try:
__magic_name__ = int(A_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__magic_name__ = 2
__magic_name__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__magic_name__ = i
while n % i == 0:
__magic_name__ = n // i
i += 1
return int(A_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase__ =True
except ImportError:
lowercase__ =False
lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( lowerCAmelCase__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase__ ( __lowercase ):
@staticmethod
def lowerCAmelCase (snake_case_ : ArgumentParser ):
__a : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=snake_case_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=snake_case_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=snake_case_ )
def __init__(self : Dict , snake_case_ : bool , snake_case_ : str , snake_case_ : Dict=None , *snake_case_ : Optional[Any] ):
__a : Union[str, Any] = testing
__a : List[Any] = testing_file
__a : Any = path
def lowerCAmelCase (self : int ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a : Union[str, Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(snake_case_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
__a : Union[str, Any] = (
Path(snake_case_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__a : Union[str, Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
__a : List[Any] = json.load(snake_case_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case_ , extra_context=snake_case_ , )
__a : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
__a : Optional[Any] = json.load(snake_case_ )
__a : str = configuration['''lowercase_modelname''']
__a : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
__a : Any = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a : Optional[int] = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a : Dict = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(snake_case_ , exist_ok=snake_case_ )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=snake_case_ )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(snake_case_ : Union[str, Any] ):
with open(snake_case_ , '''r''' ) as f:
__a : Union[str, Any] = f.readlines()
with open(snake_case_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case_ : str , snake_case_ : str , snake_case_ : List[str] ):
# Create temp file
__a , __a : Tuple = mkstemp()
__a : Optional[Any] = False
with fdopen(snake_case_ , '''w''' ) as new_file:
with open(snake_case_ ) as old_file:
for line in old_file:
new_file.write(snake_case_ )
if line_to_copy_below in line:
__a : Tuple = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case_ )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(snake_case_ , snake_case_ )
# Remove original file
remove(snake_case_ )
# Move new file
move(snake_case_ , snake_case_ )
def skip_units(snake_case_ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case_ : int ):
with open(snake_case_ ) as datafile:
__a : List[Any] = []
__a : int = False
__a : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a : Optional[Any] = line.split('''"''' )[1]
__a : Dict = skip_units(snake_case_ )
elif "# Below: " in line and "##" not in line:
__a : str = line.split('''"''' )[1]
__a : Any = skip_units(snake_case_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case_ , snake_case_ , snake_case_ )
__a : str = []
elif "# Replace with" in line and "##" not in line:
__a : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(snake_case_ )
remove(snake_case_ )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(snake_case_ )
| 216 | 0 |
'''simple docstring'''
from collections import namedtuple
lowercase__ = namedtuple("from_to", "from_ to")
lowercase__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(UpperCAmelCase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = b.T
UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCAmelCase_ ) , axis=1 )
UpperCAmelCase : List[Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 )
UpperCAmelCase : List[str] = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = x.reshape(-1 , 3 )
UpperCAmelCase : Optional[int] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ )
return np.argmin(UpperCAmelCase_ , axis=1 )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = size if size is not None else {'height': 256, 'width': 256}
UpperCAmelCase : List[Any] = get_size_dict(lowercase_ )
UpperCAmelCase : str = np.array(lowercase_ ) if clusters is not None else None
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Any = resample
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = do_color_quantize
def UpperCAmelCase_ ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size['height'], size['width']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
UpperCAmelCase : int = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
UpperCAmelCase : Dict = image - 1
return image
def UpperCAmelCase_ ( self : str , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : List[str] , ) -> PIL.Image.Image:
UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Optional[Any] = size if size is not None else self.size
UpperCAmelCase : Optional[int] = get_size_dict(lowercase_ )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
UpperCAmelCase : List[str] = np.array(lowercase_ )
UpperCAmelCase : int = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase : Tuple = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
UpperCAmelCase : List[str] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase : int = np.array(lowercase_ )
UpperCAmelCase : str = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase : Optional[int] = images.shape[0]
UpperCAmelCase : Union[str, Any] = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase : int = list(lowercase_ )
else:
UpperCAmelCase : Dict = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase : Any = {'input_ids': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 280 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a__ ( a__ ):
"""simple docstring"""
return EnvironmentCommand()
class lowerCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : ArgumentParser ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = huggingface_hub.__version__
__SCREAMING_SNAKE_CASE = """not installed"""
__SCREAMING_SNAKE_CASE = """NA"""
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = """not installed"""
if is_transformers_available():
import transformers
__SCREAMING_SNAKE_CASE = transformers.__version__
__SCREAMING_SNAKE_CASE = """not installed"""
if is_accelerate_available():
import accelerate
__SCREAMING_SNAKE_CASE = accelerate.__version__
__SCREAMING_SNAKE_CASE = """not installed"""
if is_xformers_available():
import xformers
__SCREAMING_SNAKE_CASE = xformers.__version__
__SCREAMING_SNAKE_CASE = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Any ) -> Any:
"""simple docstring"""
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 267 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : List[Any] , __a : str=13 , __a : Dict=10 , __a : Tuple=3 , __a : Dict=2 , __a : Any=2 , __a : Union[str, Any]=True , __a : str=True , __a : Optional[int]=32 , __a : List[str]=5 , __a : int=4 , __a : Any=37 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : Tuple=10 , __a : Union[str, Any]=0.02 , __a : Union[str, Any]="divided_space_time" , __a : Dict=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = patch_size
_a = num_frames
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = attention_type
_a = initializer_range
_a = scope
_a = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_a = (image_size // patch_size) ** 2
_a = (num_frames) * self.num_patches_per_frame + 1
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : int ):
_a = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_a = self.num_labels
return config
def UpperCamelCase__ ( self : List[Any] , __a : str , __a : Union[str, Any] , __a : List[str] ):
_a = TimesformerModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] ):
_a = TimesformerForVideoClassification(__a )
model.to(__a )
model.eval()
_a = model(__a )
# verify the logits shape
_a = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __a )
def UpperCamelCase__ ( self : Any ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__a =(
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Dict ):
_a = TimesformerModelTester(self )
_a = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : List[Any] , __a : Any=False ):
_a = copy.deepcopy(__a )
if return_labels:
if model_class in get_values(__a ):
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def UpperCamelCase__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def UpperCamelCase__ ( self : List[Any] ):
pass
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ ( self : Dict ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a )
@slow
def UpperCamelCase__ ( self : Tuple ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TimesformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ ( self : Optional[Any] ):
if not self.has_attentions:
pass
else:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = self.model_tester.seq_length
_a = self.model_tester.num_frames
_a = True
_a = False
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_a = len(__a )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCamelCase__ ( self : str ):
def check_hidden_states_output(__a : Union[str, Any] , __a : Optional[Any] , __a : Any ):
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.hidden_states
_a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
_a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def _lowerCamelCase ( ) -> int:
_a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_a = np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : List[str] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self : str ):
_a = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__a )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(video[:8] , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(**__a )
# verify the logits
_a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 63 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Construct model
if openai_config_file == "":
UpperCAmelCase_ = OpenAIGPTConfig()
else:
UpperCAmelCase_ = OpenAIGPTConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_ = OpenAIGPTModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 241 |
"""simple docstring"""
from maths.prime_check import is_prime
def a__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 1 |
'''simple docstring'''
import math
def snake_case_ (_a : float , _a : float ):
return math.pow(_a , 2 ) - a
def snake_case_ (_a : float ):
return 2 * x
def snake_case_ (_a : float ):
UpperCAmelCase = 2.0
while start <= a:
UpperCAmelCase = math.pow(_a , 2 )
return start
def snake_case_ (_a : float , _a : int = 9_9_9_9 , _a : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase = get_initial_point(_a )
for _ in range(_a ):
UpperCAmelCase = value
UpperCAmelCase = value - fx(_a , _a ) / fx_derivative(_a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : int ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class __a( _a ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(_SCREAMING_SNAKE_CASE ,'''argv''' ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_SCREAMING_SNAKE_CASE ,0.6_66 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
| 235 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 235 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : str=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : str=32 , lowercase_ : List[Any]=5 , lowercase_ : Dict=4 , lowercase_ : Tuple=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=16 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[Any]=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_attention_mask
SCREAMING_SNAKE_CASE_ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = num_choices
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase__ , )
return config, input_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = FlaxDistilBertModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''')
SCREAMING_SNAKE_CASE_ : str = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCamelCase__)
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
SCREAMING_SNAKE_CASE_ : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0]
SCREAMING_SNAKE_CASE_ : List[str] = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4))
| 91 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def lowercase_ ( self : str)-> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__) for k, v in self.__dict__.items()})
| 217 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 210 |
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 210 | 1 |
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[int] ) -> Any:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
__snake_case = str(bin(UpperCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> Union[str, Any]:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
__snake_case = str(bin(UpperCamelCase_ ) )[2:]
if shift_amount >= len(UpperCamelCase_ ):
return "0b0"
__snake_case = binary_number[: len(UpperCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : List[Any] ) -> Tuple:
if number >= 0: # Get binary representation of positive number
__snake_case = '''0''' + str(bin(UpperCamelCase_ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
__snake_case = len(bin(UpperCamelCase_ )[3:] ) # Find 2's complement of number
__snake_case = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
__snake_case = (
'''1''' + '''0''' * (binary_number_length - len(UpperCamelCase_ )) + binary_number
)
if shift_amount >= len(UpperCamelCase_ ):
return "0b" + binary_number[0] * len(UpperCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
a : Tuple = data_utils.TransfoXLTokenizer
a : Optional[Any] = data_utils.TransfoXLCorpus
a : List[Any] = data_utils
a : Optional[int] = data_utils
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowercase , """rb""" ) as fp:
UpperCAmelCase : Dict = pickle.load(_lowercase , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase : List[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCAmelCase : Optional[int] = corpus.vocab.__dict__
torch.save(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , _lowercase )
UpperCAmelCase : Union[str, Any] = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_lowercase , _lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase : int = os.path.abspath(_lowercase )
UpperCAmelCase : Dict = os.path.abspath(_lowercase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase : Any = TransfoXLConfig()
else:
UpperCAmelCase : Optional[Any] = TransfoXLConfig.from_json_file(_lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : int = TransfoXLLMHeadModel(_lowercase )
UpperCAmelCase : Any = load_tf_weights_in_transfo_xl(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
UpperCAmelCase : List[str] = os.path.join(_lowercase , _lowercase )
UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase )
print(F'''Save PyTorch model to {os.path.abspath(_lowercase )}''' )
torch.save(model.state_dict() , _lowercase )
print(F'''Save configuration file to {os.path.abspath(_lowercase )}''' )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
a : Optional[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 338 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338 | 1 |
from __future__ import annotations
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
if len(lowercase ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCamelCase = UpperCamelCase = sum(array[:k] )
for i in range(len(lowercase ) - k ):
UpperCamelCase = current_sum - array[i] + array[i + k]
UpperCamelCase = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCAmelCase : List[str] = [randint(-1_000, 1_000) for i in range(100)]
_UpperCAmelCase : str = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 222 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCAmelCase :
def __init__( self : Optional[Any], __A : Dict = "cpu", __A : Optional[int] = "openai/clip-vit-large-patch14" ):
UpperCAmelCase : Any = device
UpperCAmelCase : Union[str, Any] = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : int = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase : Tuple = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase : List[Any] = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCAmelCase : Any = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase : Tuple = torchvision.transforms.CenterCrop(2_2_4 )
def __magic_name__ ( self : List[str], __A : int ):
UpperCAmelCase : Optional[int] = self.resize(lowerCAmelCase_ )
UpperCAmelCase : Dict = self.center_crop(lowerCAmelCase_ )
UpperCAmelCase : Dict = self.normalize(lowerCAmelCase_ )
return images
def __call__( self : Optional[Any], __A : Union[str, Any]=None, __A : Any=None, **__A : List[str] ):
UpperCAmelCase : Optional[Any] = self.tokenizer(text=lowerCAmelCase_, **lowerCAmelCase_ )
UpperCAmelCase : List[str] = self.preprocess_img(lowerCAmelCase_ )
UpperCAmelCase : Dict = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int], __A : Any=1_0, __A : Tuple=0.0_1, __A : str=None, __A : str=None, __A : Union[str, Any]=None, __A : Any=None, __A : Optional[int]=None, __A : int=None, __A : Tuple=False, __A : Any=True, __A : Optional[Any]="image", __A : Tuple=True, __A : Optional[int]=False, __A : Union[str, Any]=False, __A : Any=False, ):
super().__init__()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = device if device else get_device()
if vqgan:
UpperCAmelCase : Any = vqgan
else:
UpperCAmelCase : Dict = load_vqgan(self.device, conf_path=lowerCAmelCase_, ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
UpperCAmelCase : Optional[int] = clip
else:
UpperCAmelCase : Union[str, Any] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCAmelCase : Dict = ProcessorGradientFlow(device=self.device )
UpperCAmelCase : Optional[Any] = iterations
UpperCAmelCase : str = lr
UpperCAmelCase : Any = log
UpperCAmelCase : Any = make_grid
UpperCAmelCase : Optional[int] = return_val
UpperCAmelCase : Dict = quantize
UpperCAmelCase : Optional[Any] = self.vqgan.decoder.z_shape
def __magic_name__ ( self : Dict, __A : str=None, __A : Union[str, Any]=None, __A : Any=5, __A : Union[str, Any]=True ):
UpperCAmelCase : List[Any] = []
if output_path is None:
UpperCAmelCase : str = '''./animation.gif'''
if input_path is None:
UpperCAmelCase : Any = self.save_path
UpperCAmelCase : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCAmelCase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCAmelCase : int = total_duration / len(lowerCAmelCase_ )
UpperCAmelCase : int = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
UpperCAmelCase : int = 1.5
UpperCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_, lowerCAmelCase_, duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def __magic_name__ ( self : Optional[int], __A : List[str]=None, __A : str=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCAmelCase : Dict = preprocess(Image.open(lowerCAmelCase_ ), target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase : Dict = preprocess_vqgan(lowerCAmelCase_ )
UpperCAmelCase , *UpperCAmelCase : Dict = self.vqgan.encode(lowerCAmelCase_ )
return z
def __magic_name__ ( self : Optional[Any], __A : Dict ):
UpperCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
UpperCAmelCase : Optional[Any] = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase : Optional[Any] = self.vqgan.quantize(lowerCAmelCase_ )
else:
UpperCAmelCase : List[Any] = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def __magic_name__ ( self : Dict, __A : List[str], __A : Optional[int], __A : Optional[Any]=None ):
UpperCAmelCase : List[str] = self.clip_preprocessor(text=lowerCAmelCase_, images=lowerCAmelCase_, return_tensors='''pt''', padding=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = self.clip(**lowerCAmelCase_ )
UpperCAmelCase : str = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : List[Any], __A : Union[str, Any] ):
UpperCAmelCase : int = self._get_clip_similarity(pos_prompts['''prompts'''], lowerCAmelCase_, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCAmelCase : Dict = self._get_clip_similarity(neg_prompts['''prompts'''], lowerCAmelCase_, weights=neg_prompts['''weights'''] )
else:
UpperCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device )
UpperCAmelCase : Optional[Any] = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def __magic_name__ ( self : Dict, __A : Optional[int], __A : Union[str, Any], __A : Dict ):
UpperCAmelCase : str = torch.randn_like(self.latent, requires_grad=lowerCAmelCase_, device=self.device )
UpperCAmelCase : str = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase : List[str] = self._add_vector(lowerCAmelCase_ )
UpperCAmelCase : int = loop_post_process(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = self._get_CLIP_loss(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
print('''CLIP loss''', lowerCAmelCase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __magic_name__ ( self : Optional[Any], __A : Union[str, Any], __A : Optional[Any], __A : Union[str, Any] ):
wandb.init(reinit=lowerCAmelCase_, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCAmelCase : int = Image.open(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''', wandb.Image(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[Any], __A : List[Any] ):
if not prompts:
return []
UpperCAmelCase : int = []
UpperCAmelCase : str = []
if isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
UpperCAmelCase : str = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCAmelCase_, (tuple, list) ):
UpperCAmelCase : List[str] = prompt[0]
UpperCAmelCase : str = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase : Dict = prompt.split(''':''' )
UpperCAmelCase : List[str] = float(lowerCAmelCase_ )
else:
UpperCAmelCase : Dict = prompt
UpperCAmelCase : Any = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_, device=self.device ),
}
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int=None, __A : int=None, __A : Tuple=True, __A : Tuple=False, __A : Optional[Any]=True, __A : Any=True, __A : int=None, ):
if image_path:
UpperCAmelCase : Optional[Any] = self._get_latent(lowerCAmelCase_ )
else:
UpperCAmelCase : Tuple = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase : Optional[int] = self.process_prompts(lowerCAmelCase_ )
UpperCAmelCase : Dict = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
UpperCAmelCase : Dict = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
UpperCAmelCase : Any = save_path + '''_''' + get_timestamp()
os.makedirs(lowerCAmelCase_ )
UpperCAmelCase : int = save_path
UpperCAmelCase : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
UpperCAmelCase : Any = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}_final.png''' ) )
| 365 |
import logging
import os
from .state import PartialState
class __UpperCAmelCase ( logging.LoggerAdapter ):
@staticmethod
def __magic_name__ ( __A : str ):
UpperCAmelCase : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : Union[str, Any], *__A : Optional[int], **__A : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
UpperCAmelCase : List[str] = kwargs.pop('''main_process_only''', __A )
UpperCAmelCase : int = kwargs.pop('''in_order''', __A )
if self.isEnabledFor(__A ):
if self._should_log(__A ):
UpperCAmelCase , UpperCAmelCase : Dict = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
elif in_order:
UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.process(__A, __A )
self.logger.log(__A, __A, *__A, **__A )
state.wait_for_everyone()
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str = None ) -> Dict:
if log_level is None:
UpperCAmelCase : Union[str, Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCAmelCase )
UpperCAmelCase : Tuple = logging.getLogger(UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase , {} )
| 99 | 0 |
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_UpperCAmelCase = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return np.array_equal(snake_case_ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = v.conjugate().T
_UpperCAmelCase = v_star.dot(snake_case_ )
assert isinstance(snake_case_ , np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case_ , snake_case_ ) )
_UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 133 | 1 |
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
return math.pow(__lowerCAmelCase , 2 ) - a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
return 2 * x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> float:
UpperCamelCase__ : List[Any] = 2.0
while start <= a:
UpperCamelCase__ : int = math.pow(__lowerCAmelCase , 2 )
return start
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 9999 , __lowerCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
if a < 0:
raise ValueError("math domain error" )
UpperCamelCase__ : Any = get_initial_point(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
UpperCamelCase__ : Dict = value
UpperCamelCase__ : Tuple = value - fx(__lowerCAmelCase , __lowerCAmelCase ) / fx_derivative(__lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 |
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
UpperCamelCase__ : Union[str, Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] ='''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 196 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : Union[str, None] = None ):
"""simple docstring"""
a__ : int =torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
a__ : Tuple =v.half()
if save_path is None: # overwrite src_path
a__ : Optional[int] =src_path
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 95 |
from math import factorial
def lowerCamelCase__ (_UpperCAmelCase = 100):
return sum(int(_UpperCAmelCase) for x in str(factorial(_UpperCAmelCase)))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 137 | 0 |
from collections.abc import Iterable
from typing import Any
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ = None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = value
lowerCAmelCase_ = None # Added in order to delete a node easier
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = root
def __str__( self ) -> str:
'''simple docstring'''
return str(self.root )
def _lowercase ( self , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
def _lowercase ( self , lowercase_ ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.root is None
def _lowercase ( self , lowercase_ ) -> None:
'''simple docstring'''
lowerCAmelCase_ = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ = new_node
break
else:
lowerCAmelCase_ = parent_node.right
lowerCAmelCase_ = parent_node
def _lowercase ( self , *lowercase_ ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowercase_ )
def _lowercase ( self , lowercase_ ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
lowerCAmelCase_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ = node.left if value < node.value else node.right
return node
def _lowercase ( self , lowercase_ = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ = node.right
return node
def _lowercase ( self , lowercase_ = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ = self.root
while node.left is not None:
lowerCAmelCase_ = node.left
return node
def _lowercase ( self , lowercase_ ) -> None:
'''simple docstring'''
lowerCAmelCase_ = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left )
else:
lowerCAmelCase_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , lowercase_ ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , lowercase_=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
if node:
self.inorder(lowercase_ , node.left )
arr.append(node.value )
self.inorder(lowercase_ , node.right )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = []
self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase ( a_ ) -> list[Node]:
lowerCAmelCase_ = []
if curr_node is not None:
lowerCAmelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase ( ) -> None:
lowerCAmelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ = BinarySearchTree()
for i in testlist:
t.insert(a_ )
# Prints all the elements of the list in order traversal
print(a_ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a_ )
print(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 14 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ = logging.get_logger(__name__)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: int = b.T
a__: List[Any] = np.sum(np.square(_SCREAMING_SNAKE_CASE ) , axis=1 )
a__: int = np.sum(np.square(_SCREAMING_SNAKE_CASE ) , axis=0 )
a__: Any = np.matmul(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
a__: Tuple = x.reshape(-1 , 3 )
a__: Any = squared_euclidean_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return np.argmin(_SCREAMING_SNAKE_CASE , axis=1 )
class __snake_case ( snake_case__ ):
a__ = ['''pixel_values''']
def __init__( self , lowercase = None , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = True , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_A)
a__: Optional[Any] = size if size is not None else {'height': 2_56, 'width': 2_56}
a__: Any = get_size_dict(_A)
a__: List[str] = np.array(_A) if clusters is not None else None
a__: int = do_resize
a__: Union[str, Any] = size
a__: Dict = resample
a__: List[Any] = do_normalize
a__: str = do_color_quantize
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> int:
'''simple docstring'''
a__: Optional[Any] = get_size_dict(_A)
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}')
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A)
def lowerCamelCase_ ( self , lowercase , lowercase = None , ) -> Dict:
'''simple docstring'''
a__: Dict = rescale(image=_A , scale=1 / 1_27.5 , data_format=_A)
a__: str = image - 1
return image
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
a__: Any = do_resize if do_resize is not None else self.do_resize
a__: Dict = size if size is not None else self.size
a__: Union[str, Any] = get_size_dict(_A)
a__: Optional[Any] = resample if resample is not None else self.resample
a__: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__: Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__: List[str] = clusters if clusters is not None else self.clusters
a__: str = np.array(_A)
a__: int = make_list_of_images(_A)
if not valid_images(_A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
a__: str = [to_numpy_array(_A) for image in images]
if do_resize:
a__: int = [self.resize(image=_A , size=_A , resample=_A) for image in images]
if do_normalize:
a__: List[Any] = [self.normalize(image=_A) for image in images]
if do_color_quantize:
a__: List[Any] = [to_channel_dimension_format(_A , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__: List[Any] = np.array(_A)
a__: str = color_quantize(_A , _A).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
a__: Dict = images.shape[0]
a__: List[Any] = images.reshape(_A , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__: Union[str, Any] = list(_A)
else:
a__: Any = [to_channel_dimension_format(_A , _A) for image in images]
a__: List[Any] = {'input_ids': images}
return BatchFeature(data=_A , tensor_type=_A)
| 290 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,**a_ ) -> Optional[Any]:
super().__init__(**a_ )
requires_backends(self ,"""vision""" )
requires_backends(self ,"""torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : int = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase : List[str] = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_UpperCAmelCase : List[str] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_UpperCAmelCase : Optional[int] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase : int = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_UpperCAmelCase : Dict = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase : List[Any] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,a_ ,*a_ ,a_=None ,a_=None ,**a_ ) -> Union[str, Any]:
return super().__call__(a_ ,*a_ ,num_workers=a_ ,batch_size=a_ ,**a_ )
def _snake_case ( self ,a_ ,a_=64 ,a_ = 0 ,a_ = 512 / 1_500 ,a_ = 32 ,a_ = 1 ,) -> int:
_UpperCAmelCase : Any = load_image(a_ )
_UpperCAmelCase : Dict = self.image_processor.size["""longest_edge"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Tuple = self.image_processor.generate_crop_boxes(
a_ ,a_ ,a_ ,a_ ,a_ ,a_ )
_UpperCAmelCase : Union[str, Any] = self.image_processor(images=a_ ,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase : Optional[Any] = self.get_inference_context()
with inference_context():
_UpperCAmelCase : int = self._ensure_tensor_on_device(a_ ,device=self.device )
_UpperCAmelCase : Dict = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_UpperCAmelCase : str = image_embeddings
_UpperCAmelCase : Optional[int] = grid_points.shape[1]
_UpperCAmelCase : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 ,a_ ,a_ ):
_UpperCAmelCase : Any = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase : List[str] = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase : str = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self ,a_ ,a_=0.88 ,a_=0.95 ,a_=0 ,a_=1 ,) -> Any:
_UpperCAmelCase : Optional[int] = model_inputs.pop("""input_boxes""" )
_UpperCAmelCase : Union[str, Any] = model_inputs.pop("""is_last""" )
_UpperCAmelCase : List[Any] = model_inputs.pop("""original_sizes""" ).tolist()
_UpperCAmelCase : Tuple = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_UpperCAmelCase : Optional[Any] = self.model(**a_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase : Union[str, Any] = model_outputs["""pred_masks"""]
_UpperCAmelCase : List[Any] = self.image_processor.post_process_masks(
a_ ,a_ ,a_ ,a_ ,binarize=a_ )
_UpperCAmelCase : Any = model_outputs["""iou_scores"""]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,a_ ,a_ ,a_ ,a_ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self ,a_ ,a_=False ,a_=False ,a_=0.7 ,) -> Dict:
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_UpperCAmelCase : Any = torch.cat(a_ )
_UpperCAmelCase : int = torch.cat(a_ )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = self.image_processor.post_process_for_mask_generation(
a_ ,a_ ,a_ ,a_ )
_UpperCAmelCase : List[Any] = defaultdict(a_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(a_ )
_UpperCAmelCase : Optional[int] = {}
if output_rle_mask:
_UpperCAmelCase : Tuple = rle_mask
if output_bboxes_mask:
_UpperCAmelCase : Dict = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 349 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """yolos"""
def __init__( self ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1E-1_2 ,a_=[512, 864] ,a_=16 ,a_=3 ,a_=True ,a_=100 ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,) -> List[str]:
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : List[Any] = num_detection_tokens
_UpperCAmelCase : Tuple = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Dict = bbox_cost
_UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
_UpperCAmelCase : int = bbox_loss_coefficient
_UpperCAmelCase : Optional[Any] = giou_loss_coefficient
_UpperCAmelCase : Union[str, Any] = eos_coefficient
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> int:
return 12
| 349 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__lowerCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for attribute in key.split('.' ):
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
A__ = None
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
A__ = True
elif name.split('.' )[0] == "proj":
A__ = fairseq_model.proj
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCamelCase__ )[0].split('.' )[-2]
A__ = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
A__ = 'weight'
else:
A__ = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = full_name.split('conv_layers.' )[-1]
A__ = name.split('.' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = [line.split(' ' )[0] for line in lines]
A__ = len(UpperCamelCase__ )
A__ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
"""simple docstring"""
A__ = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
A__ = SpeechaTextaConfig.from_pretrained(
UpperCamelCase__ , vocab_size=UpperCamelCase__ , decoder_layers=UpperCamelCase__ , do_stable_layer_norm=UpperCamelCase__ )
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A__ = model[0].eval()
# set weights for wav2vec2 encoder
A__ = WavaVecaModel(UpperCamelCase__ )
A__ = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
A__ = SpeechaTextaForCausalLM(UpperCamelCase__ )
A__ , A__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
A__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ = False
# add projection layer
A__ = nn.Parameter(projection_layer.weight )
A__ = nn.Parameter(projection_layer.bias )
A__ = create_vocab_dict(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
A__ = SpeechaTextaTokenizer(os.path.join(UpperCamelCase__ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ = hf_wavavec.config.to_dict()
A__ = tokenizer.pad_token_id
A__ = tokenizer.bos_token_id
A__ = tokenizer.eos_token_id
A__ = 'speech_to_text_2'
A__ = 'wav2vec2'
A__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 221 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( __a):
def _UpperCamelCase ( self : Dict , __UpperCamelCase : str ) -> Union[str, Any]:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(UpperCamelCase__ )
return match
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> int:
with open(UpperCamelCase__ , encoding='''utf-8''' ) as input_file:
_UpperCamelCase = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(UpperCamelCase__ )
_UpperCamelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCamelCase ( self : Any ) -> Optional[int]:
_UpperCamelCase = Path('''./datasets''' )
_UpperCamelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def _UpperCamelCase ( self : Tuple ) -> Any:
_UpperCamelCase = Path('''./datasets''' )
_UpperCamelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 368 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''BlipImageProcessor'''
snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.image_processor
def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=10_24 , _UpperCAmelCase=10_24 , _UpperCAmelCase=False , **_UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
lowercase : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
lowercase : List[str] = tok.pad_token_id
def get_lens(_UpperCAmelCase ):
lowercase : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=5_12 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase : Union[str, Any] = []
for batch in dl:
lowercase : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
lowercase : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
lowercase : str = get_lens(lowerCAmelCase_ )
lowercase : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
lowercase : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case__ ) != 0:
_lowerCAmelCase : int = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case__ ) != cols:
raise error
for value in row:
if not isinstance(snake_case__ , (int, float) ):
raise error
_lowerCAmelCase : Tuple = rows
else:
_lowerCAmelCase : Dict = []
def a ( self ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a ( self ):
'''simple docstring'''
return len(self.rows )
@property
def a ( self ):
'''simple docstring'''
return len(self.rows[0] )
@property
def a ( self ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def a ( self ):
'''simple docstring'''
return self.order[0] == self.order[1]
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case__ )
def a ( self ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a ( self ):
'''simple docstring'''
return bool(self.determinant() )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case__ ).determinant()
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(snake_case__ , snake_case__ )
return -1 * self.get_minor(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(snake_case__ , snake_case__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a ( self ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
'''simple docstring'''
return str(self.rows )
def __str__( self ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case__ , snake_case__ ):
raise type_error
for value in row:
if not isinstance(snake_case__ , (int, float) ):
raise type_error
if len(snake_case__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case__ )
else:
_lowerCAmelCase : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case__ , snake_case__ ):
raise type_error
for value in column:
if not isinstance(snake_case__ , (int, float) ):
raise type_error
if len(snake_case__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowerCAmelCase : List[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowerCAmelCase : Union[str, Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case__ ):
'''simple docstring'''
return not self == other
def __neg__( self ):
'''simple docstring'''
return self * -1
def __add__( self , snake_case__ ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case__ ):
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case__ , snake_case__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case__ , snake_case__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowerCAmelCase : List[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a ( cls , snake_case__ , snake_case__ ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 1 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 2, 4, 6, 8]
_SCREAMING_SNAKE_CASE : Dict = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase_ = 0
for digit in range(10 ):
lowerCamelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
lowerCamelCase_ = 0
for digita in range(10 ):
lowerCamelCase_ = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase_ = ODD_DIGITS
else:
lowerCamelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowerCamelCase__ ( _lowerCamelCase : int = 9 ) -> int:
lowerCamelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
lowerCamelCase_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase_ = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 183 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ):
from .. import __version__
A : Union[str, Any] = take_from
A : Union[str, Any] = ()
if not isinstance(args[0] , _lowerCamelCase ):
A : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A : List[str] = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
A : str = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
A : Any = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A : Optional[int] = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A : Tuple = warning + " " if standard_warn else ""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
A : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
A : Union[str, Any] = call_frame.filename
A : List[Any] = call_frame.lineno
A : int = call_frame.function
A , A : Any = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 256 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__SCREAMING_SNAKE_CASE = 3
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating primitive root of p" )
while True:
A : str = random.randrange(3 , _lowerCamelCase )
if pow(_lowerCamelCase , 2 , _lowerCamelCase ) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) == 1:
continue
return g
def UpperCAmelCase ( _lowerCamelCase ):
print("Generating prime p..." )
A : int = rabin_miller.generate_large_prime(_lowerCamelCase ) # select large prime number.
A : List[str] = primitive_root(_lowerCamelCase ) # one primitive root on modulo p.
A : int = random.randrange(3 , _lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
A : Tuple = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
A : int = (key_size, e_a, e_a, p)
A : str = (key_size, d)
return public_key, private_key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
A , A : Any = generate_key(_lowerCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCAmelCase ( ):
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 256 | 1 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = Path(tmpdirname)
SCREAMING_SNAKE_CASE = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
SCREAMING_SNAKE_CASE = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
SCREAMING_SNAKE_CASE = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
SCREAMING_SNAKE_CASE = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
SCREAMING_SNAKE_CASE = tokenizer(["Making tiny model"], return_tensors="pt")
SCREAMING_SNAKE_CASE = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 247 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 1 |
import numpy as np
def _UpperCamelCase ( UpperCamelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( UpperCamelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case : List[str] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
__snake_case : Dict = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase__ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase__ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 122 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase__ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase__ : Tuple = data_utils.TransfoXLCorpus
lowercase__ : Union[str, Any] = data_utils
lowercase__ : Union[str, Any] = data_utils
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , '''rb''' ) as fp:
lowerCAmelCase = pickle.load(snake_case__ , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCAmelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
lowerCAmelCase = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
lowerCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case__ )
lowerCAmelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCAmelCase = os.path.abspath(snake_case__ )
lowerCAmelCase = os.path.abspath(snake_case__ )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCAmelCase = TransfoXLConfig()
else:
lowerCAmelCase = TransfoXLConfig.from_json_file(snake_case__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase = TransfoXLLMHeadModel(snake_case__ )
lowerCAmelCase = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
print(f"Save PyTorch model to {os.path.abspath(snake_case__ )}" )
torch.save(model.state_dict() , snake_case__ )
print(f"Save configuration file to {os.path.abspath(snake_case__ )}" )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowercase__ : Optional[int] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 338 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 206 |
from random import randint, random
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ):
SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(highway[0] )
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowercase ( _A , _A , _A ) -> Any:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Any = AlbertConfig.from_json_file(_A )
print(F"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE : str = AlbertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_A , _A , _A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 245 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] ="""table-transformer"""
UpperCAmelCase__ : Union[str, Any] =["""past_key_values"""]
UpperCAmelCase__ : Any ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Any=1_0_0 , UpperCAmelCase__ : Optional[Any]=6 , UpperCAmelCase__ : Dict=2_0_4_8 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Union[str, Any]=2_0_4_8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int="sine" , UpperCAmelCase__ : Dict="resnet50" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : int=0.1 , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = None, None, None
SCREAMING_SNAKE_CASE : List[Any] = use_timm_backbone
SCREAMING_SNAKE_CASE : List[Any] = backbone_config
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[Any] = d_model
SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : List[str] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : Tuple = activation_function
SCREAMING_SNAKE_CASE : int = init_std
SCREAMING_SNAKE_CASE : str = init_xavier_std
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Any = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = backbone
SCREAMING_SNAKE_CASE : Optional[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE : List[Any] = class_cost
SCREAMING_SNAKE_CASE : Tuple = bbox_cost
SCREAMING_SNAKE_CASE : Dict = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : List[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Any ) ->int:
"""simple docstring"""
return self.d_model
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] =version.parse("""1.11""" )
@property
def _lowercase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase ( self : Optional[Any] ) ->float:
"""simple docstring"""
return 1e-5
@property
def _lowercase ( self : Tuple ) ->int:
"""simple docstring"""
return 1_2
| 245 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase_ :
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : str = os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
UpperCAmelCase_ : str = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 370 |
"""simple docstring"""
def snake_case ( A__ = 10_00 ):
UpperCAmelCase_ : Optional[Any] = 2**power
UpperCAmelCase_ : Optional[int] = str(A__ )
UpperCAmelCase_ : Tuple = list(A__ )
UpperCAmelCase_ : Any = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase_ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCamelCase_ = solution(power)
print('''Sum of the digits is: ''', result)
| 253 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase = tuple[int, int]
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: set[int] = vertices
lowercase__: dict[EdgeT, int] = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase__: Optional[int] = weight
def SCREAMING_SNAKE_CASE__ ( self ) -> Graph:
'''simple docstring'''
lowercase__: Graph = Graph({min(self.vertices )} , {} )
lowercase__: EdgeT
lowercase__: int
lowercase__: EdgeT
lowercase__: int
while len(subgraph.vertices ) < len(self.vertices ):
lowercase__: Dict = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase__: Dict = edge
lowercase__: List[str] = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def snake_case_ ( snake_case = "p107_network.txt" ) -> int:
lowercase__: str = os.path.abspath(os.path.dirname(snake_case ) )
lowercase__: str = os.path.join(snake_case , snake_case )
lowercase__: dict[EdgeT, int] = {}
lowercase__: list[str]
lowercase__: int
lowercase__: int
with open(snake_case ) as f:
lowercase__: List[Any] = f.read().strip().split('\n' )
lowercase__: Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(snake_case ) ):
for edgea in range(snake_case ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase__: Tuple = int(adjaceny_matrix[edgea][edgea] )
lowercase__: Graph = Graph(set(range(len(snake_case ) ) ) , snake_case )
lowercase__: Graph = graph.prims_algorithm()
lowercase__: int = sum(graph.edges.values() )
lowercase__: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = CpmAntTokenizer
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase__: Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase__: Optional[Any] = '今天天气真好!'
lowercase__: str = ['今天', '天气', '真', '好', '!']
lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[str] = '今天天气真好!'
lowercase__: List[str] = [tokenizer.bos_token] + tokens
lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 196 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = DistilBertTokenizer
UpperCamelCase = DistilBertTokenizerFast
UpperCamelCase = True
@slow
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 241 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=False , _UpperCAmelCase : str=10 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[int]=32 * 8 , _UpperCAmelCase : str=32 * 8 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=64 , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 128
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=False ) -> str:
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCAmelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase = 1e-4
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase_ = inputs["pixel_values"].to(_UpperCAmelCase )
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["mask_labels"]]
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 241 | 1 |
from collections.abc import Iterable
from typing import Any
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int | None = None) ->Union[str, Any]:
'''simple docstring'''
A__ = value
A__ = None # Added in order to delete a node easier
A__ = None
A__ = None
def __repr__( self : Optional[Any]) ->str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Node | None = None) ->Optional[Any]:
'''simple docstring'''
A__ = root
def __str__( self : List[str]) ->str:
'''simple docstring'''
return str(self.root)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node | None) ->None:
'''simple docstring'''
if new_children is not None: # reset its kids
A__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(UpperCAmelCase__): # If it is the right children
A__ = new_children
else:
A__ = new_children
else:
A__ = new_children
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Node) ->bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->bool:
'''simple docstring'''
return self.root is None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->None:
'''simple docstring'''
A__ = Node(UpperCAmelCase__) # create a new Node
if self.empty(): # if Tree is empty
A__ = new_node # set its root
else: # Tree is not empty
A__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A__ = new_node # We insert the new node in a leaf
break
else:
A__ = parent_node.left
else:
if parent_node.right is None:
A__ = new_node
break
else:
A__ = parent_node.right
A__ = parent_node
def SCREAMING_SNAKE_CASE ( self : Optional[int] , *UpperCAmelCase__ : Optional[Any]) ->None:
'''simple docstring'''
for value in values:
self.__insert(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[str]) ->Node | None:
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''')
else:
A__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A__ = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Node | None = None) ->Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
A__ = self.root
if not self.empty():
while node.right is not None:
A__ = node.right
return node
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Node | None = None) ->Node | None:
'''simple docstring'''
if node is None:
A__ = self.root
if self.root is None:
return None
if not self.empty():
A__ = self.root
while node.left is not None:
A__ = node.left
return node
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : int) ->None:
'''simple docstring'''
A__ = self.search(UpperCAmelCase__) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(UpperCAmelCase__ , UpperCAmelCase__)
elif node.left is None: # Has only right children
self.__reassign_nodes(UpperCAmelCase__ , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(UpperCAmelCase__ , node.left)
else:
A__ = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
A__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Node | None) ->Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any=None) ->Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : list , UpperCAmelCase__ : Node | None) ->None:
'''simple docstring'''
if node:
self.inorder(UpperCAmelCase__ , node.left)
arr.append(node.value)
self.inorder(UpperCAmelCase__ , node.right)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Node) ->int:
'''simple docstring'''
A__ = []
self.inorder(UpperCAmelCase__ , UpperCAmelCase__) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[Node]:
"""simple docstring"""
A__ = []
if curr_node is not None:
A__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A__ = BinarySearchTree()
for i in testlist:
t.insert(lowercase_ )
# Prints all the elements of the list in order traversal
print(lowercase_ )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = XGLMTokenizer
__lowercase : int = XGLMTokenizerFast
__lowercase : Optional[Any] = True
__lowercase : str = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_8)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
def snake_case_ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
__SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase__ , )
| 255 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255 | 1 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_)
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , **lowercase ) -> Union[str, Any]:
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(lowercase )
def __lowerCamelCase ( self , **lowercase ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__UpperCamelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__UpperCamelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__UpperCamelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__UpperCamelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__UpperCamelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__UpperCamelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__UpperCamelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__UpperCamelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__UpperCamelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__UpperCamelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__UpperCamelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__UpperCamelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> Union[str, Any]:
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase=6_4 , lowercase = 0 , lowercase = 5_1_2 / 1_5_0_0 , lowercase = 3_2 , lowercase = 1 , ) -> Union[str, Any]:
__UpperCamelCase = load_image(lowercase )
__UpperCamelCase = self.image_processor.size["""longest_edge"""]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
__UpperCamelCase = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__UpperCamelCase = self.get_inference_context()
with inference_context():
__UpperCamelCase = self._ensure_tensor_on_device(lowercase , device=self.device )
__UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__UpperCamelCase = image_embeddings
__UpperCamelCase = grid_points.shape[1]
__UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
__UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
__UpperCamelCase = input_labels[:, i : i + points_per_batch]
__UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __lowerCamelCase ( self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ) -> Dict:
__UpperCamelCase = model_inputs.pop("""input_boxes""" )
__UpperCamelCase = model_inputs.pop("""is_last""" )
__UpperCamelCase = model_inputs.pop("""original_sizes""" ).tolist()
__UpperCamelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__UpperCamelCase = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__UpperCamelCase = model_outputs["""pred_masks"""]
__UpperCamelCase = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
__UpperCamelCase = model_outputs["""iou_scores"""]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __lowerCamelCase ( self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ) -> List[Any]:
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__UpperCamelCase = torch.cat(lowercase )
__UpperCamelCase = torch.cat(lowercase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
__UpperCamelCase = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
__UpperCamelCase = {}
if output_rle_mask:
__UpperCamelCase = rle_mask
if output_bboxes_mask:
__UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 349 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase__ ( logging.LoggerAdapter):
@staticmethod
def __lowerCamelCase ( lowercase ) -> Dict:
__UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , lowercase , lowercase , *lowercase , **lowercase ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__UpperCamelCase = kwargs.pop("""main_process_only""" , lowercase )
__UpperCamelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
__UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase , __UpperCamelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def _lowercase ( __A ,__A = None ):
'''simple docstring'''
if log_level is None:
__UpperCamelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" ,__A )
__UpperCamelCase = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A ,{} )
| 349 | 1 |
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'openai/whisper-base'
__UpperCAmelCase : Tuple = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
__UpperCAmelCase : Tuple = 'transcriber'
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : Dict = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ['audio']
__UpperCAmelCase : List[str] = ['text']
def lowercase_ (self : Tuple , __UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
return self.pre_processor(lowerCamelCase_ , return_tensors="pt" ).input_features
def lowercase_ (self : Any , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase_ )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )[0]
| 361 |
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = str(__A )
return n == n[::-1]
def lowerCAmelCase_ ( __A = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = 0
for i in range(1, __A ):
if is_palindrome(__A ) and is_palindrome(bin(__A ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 143 | 0 |
from collections.abc import Sequence
from queue import Queue
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None) -> Dict:
'''simple docstring'''
a__ : Tuple = start
a__ : Any = end
a__ : Optional[Any] = val
a__ : Optional[Any] = (start + end) // 2
a__ : Optional[Any] = left
a__ : Any = right
def __repr__( self) -> int:
'''simple docstring'''
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = collection
a__ : Tuple = function
if self.collection:
a__ : str = self._build_tree(0 , len(lowercase) - 1)
def __lowercase ( self , lowercase , lowercase) -> str:
'''simple docstring'''
self._update_tree(self.root , lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
return self._query_range(self.root , lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase , lowercase , self.collection[start])
a__ : Union[str, Any] = (start + end) // 2
a__ : Any = self._build_tree(lowercase , lowercase)
a__ : str = self._build_tree(mid + 1 , lowercase)
return SegmentTreeNode(lowercase , lowercase , self.fn(left.val , right.val) , lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if node.start == i and node.end == i:
a__ : Tuple = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase , lowercase)
else:
self._update_tree(node.right , lowercase , lowercase)
a__ : Union[str, Any] = self.fn(node.left.val , node.right.val)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase , lowercase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase , node.mid) , self._query_range(node.right , node.mid + 1 , lowercase) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
if self.root is not None:
a__ : List[str] = Queue()
queue.put(self.root)
while not queue.empty():
a__ : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
lowercase : str = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 99 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Any ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 54 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =True
a : bool =False
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels,n_heads=self.num_attention_heads,d_head=self.out_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,only_cross_attention=self.only_cross_attention,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = ()
for resnet, attn in zip(self.resnets,self.attentions ):
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : float =0.0
a : int =1
a : bool =True
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=__SCREAMING_SNAKE_CASE,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
if self.add_downsample:
__lowerCAmelCase = FlaxDownsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = ()
for resnet in self.resnets:
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase = self.downsamplers_a(__SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =True
a : bool =False
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels,n_heads=self.num_attention_heads,d_head=self.out_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,only_cross_attention=self.only_cross_attention,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets,self.attentions ):
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states),axis=-1 )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : int
a : int
a : float =0.0
a : int =1
a : bool =True
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(self.num_layers ):
__lowerCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels,out_channels=self.out_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
if self.add_upsample:
__lowerCAmelCase = FlaxUpsampleaD(self.out_channels,dtype=self.dtype )
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase = res_hidden_states_tuple[-1]
__lowerCAmelCase = res_hidden_states_tuple[:-1]
__lowerCAmelCase = jnp.concatenate((hidden_states, res_hidden_states),axis=-1 )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase = self.upsamplers_a(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
a : int
a : float =0.0
a : int =1
a : int =1
a : bool =False
a : bool =False
a : jnp.dtype =jnp.floataa
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels,out_channels=self.in_channels,dropout_prob=self.dropout,dtype=self.dtype,)
]
__lowerCAmelCase = []
for _ in range(self.num_layers ):
__lowerCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels,n_heads=self.num_attention_heads,d_head=self.in_channels // self.num_attention_heads,depth=1,use_linear_projection=self.use_linear_projection,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,)
attentions.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels,out_channels=self.in_channels,dropout_prob=self.dropout,dtype=self.dtype,)
resnets.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnets
__lowerCAmelCase = attentions
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__lowerCAmelCase = self.resnets[0](__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions,self.resnets[1:] ):
__lowerCAmelCase = attn(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = resnet(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,deterministic=__SCREAMING_SNAKE_CASE )
return hidden_states
| 46 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names,["""col_1""", """col_2"""] )
for i, r in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertDictEqual(__SCREAMING_SNAKE_CASE,example_records[i] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def lowerCamelCase__ ( self ): # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0],{"""col_1""": 1} )
self.assertDictEqual(dset[1],{"""col_1""": None} ) # NB: first record is used for columns
def lowerCamelCase__ ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""],Sequence(Value("""int64""" ) ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),0 )
self.assertListEqual(dset.column_names,[] )
| 46 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''luke'''
def __init__(self , SCREAMING_SNAKE_CASE__=5_02_67 , SCREAMING_SNAKE_CASE__=50_00_00 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = entity_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : int = entity_emb_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = use_entity_aware_attention
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_dropout
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
lowerCAmelCase_ :Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> int:
'''simple docstring'''
if metric == "rouge2":
lowerCAmelCase_ :Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowerCAmelCase_ :Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowerCAmelCase_ :Dict = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
lowerCAmelCase_ :Optional[int] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
lowerCAmelCase_ :List[Any] = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=lowercase__ , verbose=lowercase__ , )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Any = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__A )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A , __A , __A=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCAmelCase_ :Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
lowerCAmelCase_ :Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase_ :int = od / """test_results.txt"""
lowerCAmelCase_ :Any = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase_ :int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCAmelCase_ :List[str] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__A )
generations_file.parent.mkdir(exist_ok=__A )
with open(__A , """a+""" ) as writer:
for key in sorted(__A ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase_ :Optional[int] = metrics[key]
if isinstance(__A , torch.Tensor ):
lowerCAmelCase_ :Dict = val.item()
lowerCAmelCase_ :int = f"""{key}: {val:.6f}\n"""
writer.write(__A )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase_ :List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(__A )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> Union[str, Any]:
try:
lowerCAmelCase_ :Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase_ :Any = pl_module.model.num_parameters()
lowerCAmelCase_ :int = count_trainable_parameters(__A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__A , __A , """test""" )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ''''''
snake_case__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(self , **__UpperCamelCase )
_UpperCamelCase = repo_info
_UpperCamelCase = token
_UpperCamelCase = None
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
if self.dir_cache is None:
_UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {'''name''': str(__UpperCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Union[str, Any] , ) -> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_UpperCamelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> int:
self._get_dirs()
_UpperCamelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _UpperCamelCase ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int=False , **__UpperCamelCase : int ) -> int:
self._get_dirs()
_UpperCamelCase = PurePosixPath(path.strip('''/''' ) )
_UpperCamelCase = {}
for p, f in self.dir_cache.items():
_UpperCamelCase = PurePosixPath(p.strip('''/''' ) )
_UpperCamelCase = p.parent
if root == path:
_UpperCamelCase = f
_UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 256 |
"""simple docstring"""
def lowercase ( a__ : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = len(a__ )
while cur > 1:
# Find the maximum number in arr
_UpperCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCamelCase = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
_UpperCamelCase = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 256 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139 |
__UpperCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 139 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Union[str, Any]=None , ) ->Any:
if attention_mask is None:
_SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=16 , A=2 , A=4 , A=4 , A="relu" , A=0.1 , A=0.1 , A=0.0 , A=0.0 , A=20 , A=2 , A=1 , A=0 , ) -> str:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = decoder_layerdrop
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = self.eos_token_id # Eos Token
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
_SCREAMING_SNAKE_CASE = self.get_config()
_SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(A , A , A )
return config, inputs_dict
def snake_case_( self ) -> Any:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case_( self , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = MaMaaaModel(config=A ).get_decoder().to(A ).eval()
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""]
_SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""]
# first forward pass
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , head_mask=A , use_cache=A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , past_key_values=A )[
"""last_hidden_state"""
]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-2 ) )
def snake_case_( self , A , A ) -> str:
_SCREAMING_SNAKE_CASE = MaMaaaModel(config=A ).to(A ).eval()
_SCREAMING_SNAKE_CASE = model(**A )
_SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
_SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(A )
_SCREAMING_SNAKE_CASE = MaMaaaEncoder.from_pretrained(A ).to(A )
_SCREAMING_SNAKE_CASE = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(A )
_SCREAMING_SNAKE_CASE = MaMaaaDecoder.from_pretrained(A ).to(A )
_SCREAMING_SNAKE_CASE = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=A , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self , A , A , A , A , A ) -> List[Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = MaMaaaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info["""missing_keys"""] , [] )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_SCREAMING_SNAKE_CASE = model_class(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(A , A ) )
if not self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
del inputs["input_ids"]
else:
_SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
_SCREAMING_SNAKE_CASE = inputs.get("""decoder_input_ids""" , A )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , A )
_SCREAMING_SNAKE_CASE = model.get_input_embeddings()
if not self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE = wte(A )
else:
_SCREAMING_SNAKE_CASE = wte(A )
_SCREAMING_SNAKE_CASE = wte(A )
with torch.no_grad():
model(**A )[0]
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = input_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(A )
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(A ).eval().to(A )
if torch_device == "cuda":
model.half()
model.generate(A , attention_mask=A )
model.generate(num_beams=4 , do_sample=A , early_stopping=A , num_return_sequences=3 )
def lowerCamelCase ( __lowerCamelCase : Any ) ->Tuple:
return torch.tensor(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
lowercase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> Any:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(A )
_SCREAMING_SNAKE_CASE = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_SCREAMING_SNAKE_CASE = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , A , A )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , A )
# change to expected output here
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=A )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=A ) )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A )
# change to intended input
_SCREAMING_SNAKE_CASE = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
_SCREAMING_SNAKE_CASE = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
_SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , A , A )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , A )
# change to expected output here
_SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=A )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=A ) )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A )
_SCREAMING_SNAKE_CASE = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
_SCREAMING_SNAKE_CASE = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_SCREAMING_SNAKE_CASE = tokenizer(A , padding=A , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model.generate(
input_ids=dct["""input_ids"""].to(A ) , attention_mask=dct["""attention_mask"""].to(A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
_SCREAMING_SNAKE_CASE = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A , skip_special_tokens=A )
assert generated == expected_en
| 58 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = """RegNetConfig"""
# Base docstring
_snake_case = """facebook/regnet-y-040"""
_snake_case = [1, 1088, 7, 7]
# Image classification docstring
_snake_case = """facebook/regnet-y-040"""
_snake_case = """tabby, tabby cat"""
_snake_case = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase ( nn.Module ):
def __init__( self :Dict , _lowercase :int , _lowercase :int , _lowercase :int = 3 , _lowercase :int = 1 , _lowercase :int = 1 , _lowercase :Optional[str] = "relu" , ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(
_lowercase , _lowercase , kernel_size=_lowercase , stride=_lowercase , padding=kernel_size // 2 , groups=_lowercase , bias=_lowercase , )
lowercase__ = nn.BatchNormad(_lowercase )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = self.convolution(_lowercase )
lowercase__ = self.normalization(_lowercase )
lowercase__ = self.activation(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :RegNetConfig ):
'''simple docstring'''
super().__init__()
lowercase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__ = config.num_channels
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowercase__ = self.embedder(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :int , _lowercase :int , _lowercase :int = 2 ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(_lowercase , _lowercase , kernel_size=1 , stride=_lowercase , bias=_lowercase )
lowercase__ = nn.BatchNormad(_lowercase )
def UpperCAmelCase ( self :str , _lowercase :Tensor ):
'''simple docstring'''
lowercase__ = self.convolution(_lowercase )
lowercase__ = self.normalization(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :List[str] , _lowercase :int , _lowercase :int ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ = nn.Sequential(
nn.Convad(_lowercase , _lowercase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_lowercase , _lowercase , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCAmelCase ( self :Any , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.pooler(_lowercase )
lowercase__ = self.attention(_lowercase )
lowercase__ = hidden_state * attention
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :Tuple , _lowercase :RegNetConfig , _lowercase :int , _lowercase :int , _lowercase :int = 1 ):
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowercase , _lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act ) , RegNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=_lowercase ) , )
lowercase__ = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(_lowercase )
lowercase__ = self.shortcut(_lowercase )
hidden_state += residual
lowercase__ = self.activation(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :int , _lowercase :RegNetConfig , _lowercase :int , _lowercase :int , _lowercase :int = 1 ):
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(_lowercase , _lowercase , stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_lowercase , _lowercase , stride=_lowercase , groups=_lowercase , activation=config.hidden_act ) , RegNetSELayer(_lowercase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_lowercase , _lowercase , kernel_size=1 , activation=_lowercase ) , )
lowercase__ = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self :List[str] , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(_lowercase )
lowercase__ = self.shortcut(_lowercase )
hidden_state += residual
lowercase__ = self.activation(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :Tuple , _lowercase :RegNetConfig , _lowercase :int , _lowercase :int , _lowercase :int = 2 , _lowercase :int = 2 , ):
'''simple docstring'''
super().__init__()
lowercase__ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_lowercase , _lowercase , _lowercase , stride=_lowercase , ) , *[layer(_lowercase , _lowercase , _lowercase ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = self.layers(_lowercase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self :List[str] , _lowercase :RegNetConfig ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowercase , config.depths[1:] ):
self.stages.append(RegNetStage(_lowercase , _lowercase , _lowercase , depth=_lowercase ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tensor , _lowercase :bool = False , _lowercase :bool = True ):
'''simple docstring'''
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(_lowercase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowercase , hidden_states=_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = RegNetConfig
__lowerCamelCase = 'regnet'
__lowerCamelCase = 'pixel_values'
__lowerCamelCase = True
def UpperCAmelCase ( self :Any , _lowercase :Optional[int] ):
'''simple docstring'''
if isinstance(_lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :List[str]=False ):
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
lowercase__ = value
_snake_case = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_snake_case = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowercase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , _lowercase :Any ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = config
lowercase__ = RegNetEmbeddings(_lowercase )
lowercase__ = RegNetEncoder(_lowercase )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self :Tuple , _lowercase :Tensor , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(_lowercase )
lowercase__ = self.encoder(
_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(_lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase , pooler_output=_lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowercase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , _lowercase :List[Any] ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = config.num_labels
lowercase__ = RegNetModel(_lowercase )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self :int , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.LongTensor] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(_lowercase , output_hidden_states=_lowercase , return_dict=_lowercase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(_lowercase )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = "single_label_classification"
else:
lowercase__ = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(_lowercase , _lowercase )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(_lowercase , _lowercase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase , logits=_lowercase , hidden_states=outputs.hidden_states )
| 201 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Source language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Target language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__magic_name__ , os.path.join(__magic_name__ , f'''{split}_results.json''' ) )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(__magic_name__ , __magic_name__ , __magic_name__ ):
assert hasattr(__magic_name__ , __magic_name__ ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__magic_name__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__magic_name__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__magic_name__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
__magic_name__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , __magic_name__ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=__magic_name__ , args=__magic_name__ , data_args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , data_collator=SeqaSeqDataCollator(
__magic_name__ , __magic_name__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(metric_key_prefix="val" )
lowercase__ = data_args.n_val
lowercase__ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(test_dataset=__magic_name__ , metric_key_prefix="test" )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
lowercase__ = lmap(str.strip , __magic_name__ )
write_txt_file(__magic_name__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(__magic_name__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 201 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : int = logging.get_logger(__name__)
__snake_case : List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'realm'
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int=3_05_22 , lowerCAmelCase_ : Any=7_68 , lowerCAmelCase_ : int=1_28 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Optional[Any]=8 , lowerCAmelCase_ : Any=30_72 , lowerCAmelCase_ : Union[str, Any]="gelu_new" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : int=2_56 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : Union[str, Any]=1e-3 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : int=3_20 , lowerCAmelCase_ : Dict=13_35_37_18 , lowerCAmelCase_ : Dict=50_00 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : str=2 , **lowerCAmelCase_ : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
# Common config
A__ : int =vocab_size
A__ : Optional[Any] =max_position_embeddings
A__ : Optional[int] =hidden_size
A__ : int =retriever_proj_size
A__ : Dict =num_hidden_layers
A__ : Tuple =num_attention_heads
A__ : List[Any] =num_candidates
A__ : int =intermediate_size
A__ : Optional[int] =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : List[Any] =attention_probs_dropout_prob
A__ : Optional[Any] =initializer_range
A__ : Optional[int] =type_vocab_size
A__ : Tuple =layer_norm_eps
# Reader config
A__ : Any =span_hidden_size
A__ : Union[str, Any] =max_span_width
A__ : Dict =reader_layer_norm_eps
A__ : str =reader_beam_size
A__ : List[str] =reader_seq_len
# Retrieval config
A__ : int =num_block_records
A__ : Any =searcher_beam_size
| 134 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : Optional[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
A__ : Dict =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : int =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
A__ : Optional[int] =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=0 ) -> List[str]:
'''simple docstring'''
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ : List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A__ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : List[str] =Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
A__ : int =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : str =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Tuple =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : Optional[Any] ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Tuple =self.get_dummy_components()
A__ : str =StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
A__ : Any =sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[Any] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict =sd_pipe(**lowerCAmelCase_ ).images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[Any] =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : Union[str, Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
A__ : Optional[Any] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : int =StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : str =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : Tuple =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
A__ : int =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : List[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[Any] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
A__ : int ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : List[Any] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : Union[str, Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : Dict =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type="""np""" , )
A__ : str =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
A__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
A__ : List[str] ="""stabilityai/stable-diffusion-2-inpainting"""
A__ : Any =PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder="""scheduler""" )
A__ : Optional[int] =StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
A__ : Any =torch.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
A__ : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 134 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : int = "laion/clap-htsat-unfused"
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
def snake_case_ ( self : Tuple , **A : Tuple ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def snake_case_ ( self : Tuple , **A : Optional[int] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def snake_case_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : str = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=A , feature_extractor=A )
_UpperCAmelCase : Any = floats_list((3, 1_0_0_0) )
_UpperCAmelCase : List[Any] = feature_extractor(A , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processor(audios=A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=A , feature_extractor=A )
_UpperCAmelCase : Union[str, Any] = "This is a test string"
_UpperCAmelCase : Union[str, Any] = processor(text=A )
_UpperCAmelCase : int = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=A , feature_extractor=A )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : int = processor.batch_decode(A )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A : List[Any] , ):
super().__init__(**A )
_UpperCAmelCase : int = size if size is not None else {"shortest_edge": 2_2_4}
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_UpperCAmelCase : Union[str, Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Tuple = resample
_UpperCAmelCase : List[Any] = do_center_crop
_UpperCAmelCase : List[Any] = crop_size
_UpperCAmelCase : Tuple = do_rescale
_UpperCAmelCase : List[Any] = rescale_factor
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : Any = int((2_5_6 / 2_2_4) * size["shortest_edge"] )
_UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(A , size=A , default_to_square=A )
_UpperCAmelCase : str = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
A , size=(size_dict["height"], size_dict["width"]) , resample=A , data_format=A , **A )
def snake_case_ ( self : int , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ):
_UpperCAmelCase : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def snake_case_ ( self : List[str] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
return rescale(A , scale=A , data_format=A , **A )
def snake_case_ ( self : List[str] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_ ( self : Tuple , A : ImageInput , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : PILImageResampling = None , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[TensorType] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Dict , ):
_UpperCAmelCase : Dict = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = resample if resample is not None else self.resample
_UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[str] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : List[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : str = [self.resize(A , A , A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Dict = [self.center_crop(A , A ) for image in images]
if do_rescale:
_UpperCAmelCase : Dict = [self.rescale(A , A ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[int] = [self.normalize(A , A , A ) for image in images]
_UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 202 | 1 |
'''simple docstring'''
import operator
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list:
snake_case__ : int = operator.lt if reverse else operator.gt
snake_case__ : Any = solution or []
if not arr:
return solution
snake_case__ : int = [arr.pop(0 )]
for i, item in enumerate(_lowerCAmelCase ):
if _operator(_lowerCAmelCase , sublist[-1] ):
sublist.append(_lowerCAmelCase )
arr.pop(_lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCAmelCase )
else:
while sublist:
snake_case__ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(_lowerCAmelCase ):
if not _operator(_lowerCAmelCase , _lowerCAmelCase ):
solution.insert(_lowerCAmelCase , _lowerCAmelCase )
break
else:
solution.append(_lowerCAmelCase )
strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 35 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@dataclass(frozen=__UpperCamelCase )
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : List[str]=False , UpperCAmelCase : bool = False , ) -> List[str]:
lowerCamelCase__ : int = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , )
lowerCamelCase__ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = label_list[2], label_list[1]
lowerCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : str = cached_features_file + '.lock'
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase__ : int = torch.load(UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase__ : str = (
processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
)
logger.info('Training examples: %s' , len(UpperCAmelCase ) )
lowerCamelCase__ : Dict = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
logger.info('Saving features into cached file %s' , UpperCAmelCase )
torch.save(self.features , UpperCAmelCase )
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase : Dict ) -> InputFeatures:
return self.features[i]
def A_ ( self : int ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
UpperCAmelCase__ = 42
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Any=False , UpperCAmelCase : bool = False , ) -> Union[str, Any]:
lowerCamelCase__ : Any = hans_processors[task]()
lowerCamelCase__ : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ : str = label_list[2], label_list[1]
lowerCamelCase__ : Optional[int] = label_list
lowerCamelCase__ : int = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A_ ( self : Any ) -> Any:
return self.dataset
def __len__( self : Tuple ) -> int:
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase : Any ) -> InputFeatures:
return self.features[i]
def A_ ( self : Dict ) -> str:
return self.label_list
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : int , UpperCAmelCase : List[Any] ) -> int:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def A_ ( self : Any , UpperCAmelCase : int ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A_ ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[str] = []
for i, line in enumerate(UpperCAmelCase ):
if i == 0:
continue
lowerCamelCase__ : Tuple = '%s-%s' % (set_type, line[0])
lowerCamelCase__ : str = line[5]
lowerCamelCase__ : Dict = line[6]
lowerCamelCase__ : int = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCamelCase__ : Dict = line[0]
examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
lowerCamelCase__ : int = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCamelCase__ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCamelCase__ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
lowerCamelCase__ : List[str] = label_map[example.label] if example.label in label_map else 0
lowerCamelCase__ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_UpperCAmelCase : str = {
"""hans""": 3,
}
_UpperCAmelCase : List[Any] = {
"""hans""": HansProcessor,
}
| 50 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase_ :bool = field(default=A__ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase_ :Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
UpperCAmelCase_ :Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase_ :Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "Source language id for translation."} )
UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "Target language id for translation."} )
UpperCAmelCase_ :Optional[int] = field(default=A__ , metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(lowercase__ , os.path.join(lowercase__ , f"""{split}_results.json""" ) )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = parser.parse_args_into_dataclasses()
check_output_dir(lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ :Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase__ , lowercase__ , lowercase__ ):
assert hasattr(lowercase__ , lowercase__ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase__ , lowercase__ , getattr(lowercase__ , lowercase__ ) )
lowerCAmelCase_ :Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ :Tuple = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase_ :Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase_ :Dict = SeqaSeqDataset
# Get datasets
lowerCAmelCase_ :Tuple = (
dataset_class(
lowercase__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowerCAmelCase_ :List[str] = (
dataset_class(
lowercase__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase_ :Any = (
dataset_class(
lowercase__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase_ :Any = (
build_compute_metrics_fn(data_args.task , lowercase__ ) if training_args.predict_with_generate else None
)
lowerCAmelCase_ :Tuple = SeqaSeqTrainer(
model=lowercase__ , args=lowercase__ , data_args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , data_collator=SeqaSeqDataCollator(
lowercase__ , lowercase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase__ , tokenizer=lowercase__ , )
lowerCAmelCase_ :Union[str, Any] = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowerCAmelCase_ :Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase_ :Any = train_result.metrics
lowerCAmelCase_ :Union[str, Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ :Any = trainer.evaluate(metric_key_prefix="""val""" )
lowerCAmelCase_ :List[Any] = data_args.n_val
lowerCAmelCase_ :Tuple = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowerCAmelCase_ :Optional[int] = trainer.predict(test_dataset=lowercase__ , metric_key_prefix="""test""" )
lowerCAmelCase_ :Union[str, Any] = test_output.metrics
lowerCAmelCase_ :Dict = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase_ :List[str] = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase__ , training_args.output_dir )
all_metrics.update(lowercase__ )
if training_args.predict_with_generate:
lowerCAmelCase_ :Union[str, Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ )
lowerCAmelCase_ :str = lmap(str.strip , lowercase__ )
write_txt_file(lowercase__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __magic_name__ ( snake_case__ ):
"""simple docstring"""
__UpperCamelCase = '''fnet'''
def __init__( self :Any , snake_case :Optional[int]=32_000 , snake_case :Union[str, Any]=768 , snake_case :List[str]=12 , snake_case :Dict=3_072 , snake_case :List[str]="gelu_new" , snake_case :int=0.1 , snake_case :str=512 , snake_case :List[Any]=4 , snake_case :Dict=0.02 , snake_case :str=1e-12 , snake_case :Optional[Any]=False , snake_case :Optional[int]=512 , snake_case :Union[str, Any]=3 , snake_case :List[Any]=1 , snake_case :Dict=2 , **snake_case :List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
A_ : Dict = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Dict = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : Optional[int] = type_vocab_size
A_ : Tuple = layer_norm_eps
A_ : Tuple = use_tpu_fourier_optimizations
A_ : str = tpu_short_seq_length
| 300 |
def _SCREAMING_SNAKE_CASE ( a ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
__A : Optional[int] = len(bin(a )[3:] )
__A : Dict = bin(abs(a ) - (1 << binary_number_length) )[3:]
__A : int = (
(
'1'
+ '0' * (binary_number_length - len(a ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
a_ = logging.get_logger(__name__)
class _lowercase ( snake_case_ ):
def __init__( self : int , *snake_case : int , **snake_case : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 50 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__(self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=1_3 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=9_9 , __UpperCAmelCase : Tuple=3_2 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : str=3_7 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : Tuple=1_6 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : str=None , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_8_4
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 1_2_8
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def lowercase_ (self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__UpperCAmelCase , "use_cache" ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = len(model(__UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase , saved_model=__UpperCAmelCase )
UpperCAmelCase__ = os.path.join(__UpperCAmelCase , "saved_model" , "1" )
UpperCAmelCase__ = tf.keras.models.load_model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs["encoder_hidden_states"]
UpperCAmelCase__ = outputs["encoder_attentions"]
else:
UpperCAmelCase__ = outputs["hidden_states"]
UpperCAmelCase__ = outputs["attentions"]
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
UpperCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__UpperCAmelCase )
def lowercase_ (self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase )
UpperCAmelCase__ = getattr(self.model_tester , "key_length" , __UpperCAmelCase )
def check_decoder_attentions_output(__UpperCAmelCase : List[str] ):
UpperCAmelCase__ = len(__UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCAmelCase : List[str] ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase__ = len(__UpperCAmelCase )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_decoder_attentions_output(__UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 65 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any]= ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
lowercase__ : Tuple= parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ : List[Any]= parser.parse_args()
if not hasattr(A , "func" ):
parser.print_help()
exit(1 )
# Run
lowercase__ : Union[str, Any]= args.func(A )
service.run()
if __name__ == "__main__":
main()
| 150 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : List[Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
a : Dict = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Any= torch.load(A , map_location="cpu" )
return sd
def lowercase__(A , A , A=rename_keys_prefix ) ->List[str]:
"""simple docstring"""
lowercase__ : int= OrderedDict()
lowercase__ : Optional[Any]= torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : Union[str, Any]= key
for name_pair in rename_keys_prefix:
lowercase__ : str= new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : Union[str, Any]= d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : Optional[int]= new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase__(A , A ) ->str:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase__ : Union[str, Any]= "pretraining"
if "vcr" in checkpoint_path:
lowercase__ : str= {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Optional[Any]= {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple= {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 512}
lowercase__ : int= "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Dict= {"visual_embedding_dim": 2_048}
lowercase__ : Optional[Any]= "vqa_advanced"
elif "vqa" in checkpoint_path:
lowercase__ : Optional[int]= {"visual_embedding_dim": 2_048, "num_labels": 3_129}
lowercase__ : List[str]= "vqa"
elif "nlvr" in checkpoint_path:
lowercase__ : Dict= {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
lowercase__ : Any= "nlvr"
lowercase__ : List[Any]= VisualBertConfig(**A )
# Load State Dict
lowercase__ : Union[str, Any]= load_state_dict(A )
lowercase__ : List[str]= get_new_dict(A , A )
if model_type == "pretraining":
lowercase__ : Optional[Any]= VisualBertForPreTraining(A )
elif model_type == "vqa":
lowercase__ : Any= VisualBertForQuestionAnswering(A )
elif model_type == "nlvr":
lowercase__ : Union[str, Any]= VisualBertForVisualReasoning(A )
elif model_type == "multichoice":
lowercase__ : str= VisualBertForMultipleChoice(A )
model.load_state_dict(A )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
a : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 150 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ : List[str] = logging.getLogger(__name__)
def __lowercase ( _A , _A ) -> Any:
# save results
if os.path.exists(_A ):
if os.path.exists(os.path.join(_A , """config.json""" ) ) and os.path.isfile(
os.path.join(_A , """config.json""" ) ):
os.remove(os.path.join(_A , """config.json""" ) )
if os.path.exists(os.path.join(_A , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(_A , """pytorch_model.bin""" ) ):
os.remove(os.path.join(_A , """pytorch_model.bin""" ) )
else:
os.makedirs(_A )
model.save_pretrained(_A )
def __lowercase ( _A , _A=False ) -> Any:
SCREAMING_SNAKE_CASE : str = 2
if unlogit:
SCREAMING_SNAKE_CASE : str = torch.pow(_A , _A )
SCREAMING_SNAKE_CASE : Optional[Any] = p * torch.log(_A )
SCREAMING_SNAKE_CASE : Any = 0
return -plogp.sum(dim=-1 )
def __lowercase ( _A ) -> List[Any]:
logger.info("""lv, h >\t""" + """\t""".join(F"{x + 1}" for x in range(len(_A ) ) ) )
for row in range(len(_A ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + """\t""".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowercase ( _A , _A , _A , _A=True , _A=True , _A=None , _A=False ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE : Dict = torch.zeros(_A , _A ).to(args.device )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(_A , _A ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(_A , _A ).to(args.device )
head_mask.requires_grad_(requires_grad=_A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for step, inputs in enumerate(tqdm(_A , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE : Dict = tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE) , ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE : Optional[int] = model(_A , labels=_A , head_mask=_A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_A ):
SCREAMING_SNAKE_CASE : Tuple = entropy(attn.detach() , _A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE : Tuple = 2
SCREAMING_SNAKE_CASE : Optional[int] = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(_A )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(_A )
logger.info("""Head ranked by importance scores""" )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
SCREAMING_SNAKE_CASE : Any = torch.arange(
head_importance.numel() , device=args.device )
SCREAMING_SNAKE_CASE : str = head_ranks.view_as(_A )
print_ad_tensor(_A )
return attn_entropy, head_importance, total_loss
def __lowercase ( _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = compute_heads_importance(_A , _A , _A , compute_entropy=_A )
SCREAMING_SNAKE_CASE : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , _A , original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE : Tuple = torch.ones_like(_A )
SCREAMING_SNAKE_CASE : str = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE : List[Any] = float("""Inf""" )
SCREAMING_SNAKE_CASE : Tuple = head_importance.view(-1 ).sort()[1]
if len(_A ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
SCREAMING_SNAKE_CASE : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE : List[str] = new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE : int = 0.0
SCREAMING_SNAKE_CASE : Optional[int] = new_head_mask.view_as(_A )
SCREAMING_SNAKE_CASE : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(_A )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , head_mask=_A )
SCREAMING_SNAKE_CASE : Tuple = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(_A )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( _A , _A , _A , _A ) -> List[str]:
SCREAMING_SNAKE_CASE : int = datetime.now()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A )
SCREAMING_SNAKE_CASE : str = 1 / loss
SCREAMING_SNAKE_CASE : Optional[Any] = datetime.now() - before_time
SCREAMING_SNAKE_CASE : Optional[Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) )
}
for k, v in heads_to_prune.items():
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE : Dict = [
v,
]
assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE : Optional[Any] = datetime.now()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , )
SCREAMING_SNAKE_CASE : List[str] = 1 / loss
SCREAMING_SNAKE_CASE : Optional[Any] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _A , _A , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _A , _A )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(_A , args.output_dir )
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_A , type=_A , required=_A , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_A , type=_A , required=_A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=_A , type=_A , required=_A , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=_A , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=_A , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=_A , type=_A , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=_A , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=_A , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=_A , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=_A , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=_A , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=_A , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=_A , default=42 )
parser.add_argument("""--local_rank""" , type=_A , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=_A , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_A , default="""""" , help="""Can be used for distant debugging.""" )
SCREAMING_SNAKE_CASE : str = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
SCREAMING_SNAKE_CASE : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device("""cuda""" , args.local_rank )
SCREAMING_SNAKE_CASE : Any = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE : int = nn.parallel.DistributedDataParallel(
_A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE : List[Any] = nn.DataParallel(_A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_A )
torch.save(_A , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , _A )
# Prepare dataset
SCREAMING_SNAKE_CASE : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE : List[str] = (torch.from_numpy(_A ),)
SCREAMING_SNAKE_CASE : int = TensorDataset(*_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(_A )
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(_A , sampler=_A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_A , _A , _A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE : str = mask_heads(_A , _A , _A )
prune_heads(_A , _A , _A , _A )
if __name__ == "__main__":
main()
| 245 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) ->Dict:
"""simple docstring"""
return f"gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy"
def _lowercase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowercase ( self : str , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Tuple=(4, 4, 6_4, 6_4) , UpperCAmelCase__ : Optional[int]=False ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Tuple = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return image
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Tuple="CompVis/stable-diffusion-v1-4" ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Dict = """bf16""" if fpaa else None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = FlaxUNetaDConditionModel.from_pretrained(
UpperCAmelCase__ , subfolder="""unet""" , dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ )
return model, params
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=(4, 7_7, 7_6_8) , UpperCAmelCase__ : Optional[Any]=False ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) , dtype=UpperCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_latents(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.get_encoder_hidden_states(UpperCAmelCase__ , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = model.apply(
{"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _lowercase ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_latents(UpperCAmelCase__ , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_encoder_hidden_states(UpperCAmelCase__ , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = model.apply(
{"""params""": params} , UpperCAmelCase__ , jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCAmelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Dict = jnp.array(UpperCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-2 )
| 245 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , ):
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 32
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = """gelu"""
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Dict ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = TFEsmModel(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
_UpperCAmelCase = True
_UpperCAmelCase = TFEsmModel(config=__lowerCAmelCase )
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = TFEsmForMaskedLM(config=__lowerCAmelCase )
_UpperCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFEsmForTokenClassification(config=__lowerCAmelCase )
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : List[str] = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : str = False
_snake_case : Optional[int] = False
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = TFEsmModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCAmelCase = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
_UpperCAmelCase = model.get_output_embeddings()
assert x is None
_UpperCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_UpperCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
_UpperCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 356 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_a = logging.getLogger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
"""simple docstring"""
_UpperCAmelCase = self.layer[current_layer](UpperCAmelCase , UpperCAmelCase , head_mask[current_layer] )
_UpperCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case__ , )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = BertEncoderWithPabee(UpperCAmelCase )
self.init_weights()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = threshold
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = patience
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCAmelCase )
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_UpperCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_UpperCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
if token_type_ids is None:
_UpperCAmelCase = torch.zeros(UpperCAmelCase , dtype=torch.long , device=UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase = self.get_extended_attention_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = encoder_hidden_states.size()
_UpperCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase = torch.ones(UpperCAmelCase , device=UpperCAmelCase )
_UpperCAmelCase = self.invert_attention_mask(UpperCAmelCase )
else:
_UpperCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase = self.get_head_mask(UpperCAmelCase , self.config.num_hidden_layers )
_UpperCAmelCase = self.embeddings(
input_ids=UpperCAmelCase , position_ids=UpperCAmelCase , token_type_ids=UpperCAmelCase , inputs_embeds=UpperCAmelCase )
_UpperCAmelCase = embedding_output
if self.training:
_UpperCAmelCase = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase = self.encoder.adaptive_forward(
UpperCAmelCase , current_layer=UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase )
_UpperCAmelCase = self.pooler(UpperCAmelCase )
_UpperCAmelCase = output_layers[i](output_dropout(UpperCAmelCase ) )
res.append(UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase = self.encoder(
UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_UpperCAmelCase = self.pooler(encoder_outputs[0] )
_UpperCAmelCase = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase )]
else:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase = self.encoder.adaptive_forward(
UpperCAmelCase , current_layer=UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase )
_UpperCAmelCase = self.pooler(UpperCAmelCase )
_UpperCAmelCase = output_layers[i](UpperCAmelCase )
if regression:
_UpperCAmelCase = logits.detach()
if patient_result is not None:
_UpperCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase ) ):
patient_counter += 1
else:
_UpperCAmelCase = 0
_UpperCAmelCase = logits
if patient_counter == self.patience:
break
_UpperCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case__ , )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = BertModelWithPabee(UpperCAmelCase )
_UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = self.bert(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase = (logits[-1],)
if labels is not None:
_UpperCAmelCase = None
_UpperCAmelCase = 0
for ix, logits_item in enumerate(UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase = MSELoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 39 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
return "".join(sorted(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
return word_by_signature[signature(__lowerCAmelCase )]
_a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_a = sorted({word.strip().lower() for word in data.splitlines()})
_a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 39 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase__( __A , __A ):
lowerCAmelCase__ : Any = 1
@register_to_config
def __init__( self ,__UpperCAmelCase = 10_00 ,__UpperCAmelCase = None ) -> Tuple:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__UpperCAmelCase )
# standard deviation of the initial noise distribution
A__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A__ = 4
# running values
A__ = []
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> int:
A__ = num_inference_steps
A__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
A__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
A__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
A__ = torch.sin(steps * math.pi / 2 ) ** 2
A__ = (1.0 - self.betas**2) ** 0.5
A__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
A__ = timesteps.to(__UpperCAmelCase )
A__ = []
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
A__ = (self.timesteps == timestep).nonzero().item()
A__ = timestep_index + 1
A__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__UpperCAmelCase )
if len(self.ets ) == 1:
A__ = self.ets[-1]
elif len(self.ets ) == 2:
A__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
A__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A__ = self._get_prev_sample(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> torch.FloatTensor:
return sample
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
A__ = self.alphas[timestep_index]
A__ = self.betas[timestep_index]
A__ = self.alphas[prev_timestep_index]
A__ = self.betas[prev_timestep_index]
A__ = (sample - sigma * ets) / max(__UpperCAmelCase ,1e-8 )
A__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 154 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = filter(lambda snake_case_ : p.requires_grad , model.parameters() )
UpperCAmelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] ) -> List[str]:
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
UpperCAmelCase_ = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ = ModelCheckpoint(
dirpath=snake_case_ , filename=snake_case_ , monitor=f"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> int:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=snake_case_ , verbose=snake_case_ , )
class __A ( pl.Callback ):
def _lowercase (self : Optional[int] , __a : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def _lowercase (self : int , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : List[Any]=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ = od / "test_results.txt"
UpperCAmelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ = metrics[key]
if isinstance(__a , torch.Tensor ):
UpperCAmelCase_ = val.item()
UpperCAmelCase_ = f"""{key}: {val:.6f}\n"""
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def _lowercase (self : Optional[Any] , __a : Optional[Any] , __a : Union[str, Any] ):
try:
UpperCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ = pl_module.model.num_parameters()
UpperCAmelCase_ = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def _lowercase (self : List[str] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def _lowercase (self : List[Any] , __a : pl.Trainer , __a : Dict ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1 | 1 |
import torch
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase_ =torch.cuda.device_count()
else:
lowerCamelCase_ =0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 49 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __UpperCamelCase ( _A : Optional[int] ) ->List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase_ =k.replace(_A , _A )
if k.startswith("""encoder""" ):
lowerCamelCase_ =k.replace(""".attn""" , """.self_attn""" )
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowerCamelCase_ =k.replace("""norm1""" , """self_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
lowerCamelCase_ =k.replace("""norm3""" , """final_layer_norm""" )
return k
def __UpperCamelCase ( _A : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowerCamelCase_ =sd.pop(_A )
lowerCamelCase_ =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
lowerCamelCase_ =v
__A : Any = ['START']
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] , _A : List[str] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =model["""model"""]
lowerCamelCase_ =BlenderbotConfig.from_json_file(_A )
lowerCamelCase_ =BlenderbotForConditionalGeneration(_A )
lowerCamelCase_ =m.model.state_dict().keys()
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase_ =rename_state_dict_key(_A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase_ =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_A )
m.model.load_state_dict(_A , strict=_A )
m.half()
m.save_pretrained(_A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 49 | 1 |
'''simple docstring'''
def A_ ( snake_case ):
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:int = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case )
if number < 0:
return False
SCREAMING_SNAKE_CASE:int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : Optional[int] = '''t5'''
_A : Union[str, Any] = ['''past_key_values''']
_A : Dict = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=32_128 ,SCREAMING_SNAKE_CASE__ : List[str]=512 ,SCREAMING_SNAKE_CASE__ : Any=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=128 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1e-6 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,SCREAMING_SNAKE_CASE__ : int="relu" ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:int = vocab_size
SCREAMING_SNAKE_CASE:Any = d_model
SCREAMING_SNAKE_CASE:Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE:Optional[int] = d_ff
SCREAMING_SNAKE_CASE:Tuple = num_layers
SCREAMING_SNAKE_CASE:str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE:Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE:int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE:Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE:Dict = dropout_rate
SCREAMING_SNAKE_CASE:List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE:List[str] = initializer_factor
SCREAMING_SNAKE_CASE:Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE:str = use_cache
SCREAMING_SNAKE_CASE:Optional[Any] = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE:Any = act_info[-1]
SCREAMING_SNAKE_CASE:Tuple = act_info[0] == "gated"
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE:int = "gelu_new"
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE:Optional[int] = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE:str = {0: "batch"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction="inputs" )
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int] ):
return 13
| 139 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = """ctrl"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=246_534 , lowerCAmelCase__=256 , lowerCAmelCase__=1_280 , lowerCAmelCase__=8_192 , lowerCAmelCase__=48 , lowerCAmelCase__=16 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = dff
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
super().__init__(**lowerCAmelCase__ )
| 38 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 38 | 1 |
def lowerCamelCase__ ( a ) -> str:
_A: Any = [0] * len(_UpperCamelCase )
_A: int = []
_A: Tuple = []
_A: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
_A: List[str] = queue.pop(0 )
cnt += 1
topo.append(_UpperCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
if cnt != len(_UpperCamelCase ):
print('''Cycle exists''' )
else:
print(_UpperCamelCase )
# Adjacency List of Graph
UpperCAmelCase__ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 121 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
snake_case = XCLIPTextConfig()
# derive patch size from model name
snake_case = model_name.find('patch' )
snake_case = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case = XCLIPVisionConfig(patch_size=_UpperCamelCase , num_frames=_UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
snake_case = 3_0_7_2
snake_case = 1_2
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 1_6
snake_case = 2_4
snake_case = 7_6_8
snake_case = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
snake_case = 3_3_6
snake_case = XCLIPConfig.from_text_vision_configs(_UpperCamelCase , _UpperCamelCase )
if "large" in model_name:
snake_case = 7_6_8
return config
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
snake_case = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(_UpperCamelCase )
if "attn.in_proj" in key:
snake_case = key.split('.' )
if key.startswith('visual' ):
snake_case = key_split[3]
snake_case = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
if "weight" in key:
snake_case = val[
:dim, :
]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[
-dim:, :
]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
elif key.startswith('mit' ):
snake_case = key_split[2]
snake_case = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[dim : dim * 2]
snake_case = val[-dim:]
else:
snake_case = key_split[2]
snake_case = config.text_config.hidden_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[
dim : dim * 2
]
snake_case = val[-dim:]
else:
snake_case = rename_key(_UpperCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case = val.T
snake_case = val
return orig_state_dict
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
snake_case = 'eating_spaghetti_8_frames.npy'
elif num_frames == 1_6:
snake_case = 'eating_spaghetti.npy'
elif num_frames == 3_2:
snake_case = 'eating_spaghetti_32_frames.npy'
snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_UpperCamelCase , repo_type='dataset' , )
snake_case = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
snake_case = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
snake_case = model_to_url[model_name]
snake_case = 8
if "16-frames" in model_name:
snake_case = 1_6
elif "shot" in model_name:
snake_case = 3_2
snake_case = get_xclip_config(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
model.eval()
if "drive" in checkpoint_url:
snake_case = 'pytorch_model.bin'
gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase )
snake_case = torch.load(_UpperCamelCase , map_location='cpu' )['model']
else:
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase )['model']
snake_case = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
snake_case = XCLIPModel(_UpperCamelCase )
snake_case ,snake_case = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4
snake_case = VideoMAEImageProcessor(size=_UpperCamelCase )
snake_case = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case = XCLIPProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
snake_case = prepare_video(_UpperCamelCase )
snake_case = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case = model(**_UpperCamelCase )
# Verify outputs
snake_case = outputs.logits_per_video
snake_case = logits_per_video.softmax(dim=1 )
print('Probs:' , _UpperCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
snake_case = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
snake_case = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_UpperCamelCase , organization='nielsr' )
processor.push_to_hub(_UpperCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_UpperCamelCase , organization='nielsr' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 150 | 0 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = """examples/"""
UpperCAmelCase_ = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCAmelCase_ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCAmelCase_ = """README.md"""
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.read()
_snake_case , _snake_case = REPLACE_PATTERNS[pattern]
_snake_case = replace.replace('VERSION' , UpperCamelCase__ )
_snake_case = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='examples' )
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict=False ) -> List[str]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Tuple:
'''simple docstring'''
_snake_case = '🤗 Transformers currently provides the following architectures'
_snake_case = '1. Want to contribute a new model?'
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case = f.readlines()
# Find the start of the list.
_snake_case = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_snake_case = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_snake_case = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(UpperCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
_snake_case = f.read()
_snake_case = REPLACE_PATTERNS['init'][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str]=False ) -> int:
'''simple docstring'''
_snake_case = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_snake_case = default_version.base_version
elif patch:
_snake_case = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_snake_case = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_snake_case = input(F'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase__ ) == 0:
_snake_case = default_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
_snake_case = get_version()
_snake_case = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_snake_case = current_version.base_version
# Check with the user we got that right.
_snake_case = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase__ ) == 0:
_snake_case = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(UpperCamelCase__ )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 295 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = ['''torch''', '''torchsde''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(cls ,['torch', 'torchsde'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['torch', 'torchsde'])
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss
| 262 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : list , UpperCamelCase : int = 0 ):
UpperCAmelCase : List[Any] = length or len(_UpperCamelCase )
UpperCAmelCase : Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase , UpperCAmelCase : Tuple = list_data[i + 1], list_data[i]
UpperCAmelCase : Union[str, Any] = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) )
def _snake_case ( UpperCamelCase : list[float] ):
if point:
if isinstance(UpperCamelCase , UpperCamelCase ):
for item in point:
if not isinstance(UpperCamelCase , (int, float) ):
UpperCAmelCase : Any = (
"""Expected a list of numbers as input, found """
F"{type(UpperCamelCase ).__name__}"
)
raise TypeError(UpperCamelCase )
else:
UpperCAmelCase : int = F"Expected a list of numbers as input, found {type(UpperCamelCase ).__name__}"
raise TypeError(UpperCamelCase )
else:
raise ValueError("""Missing an input""" )
def _snake_case ( UpperCamelCase : list , UpperCamelCase : list ):
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase , UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass
class __A :
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Whether tp freeze the encoder."""} )
a__ : bool = field(default=UpperCamelCase__ , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __A :
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a__ : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
a__ : Optional[int] = field(
default=1_024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """Source language id for translation."""} )
a__ : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """Target language id for translation."""} )
a__ : Optional[int] = field(default=UpperCamelCase__ , metadata={"""help""": """# num_beams to use for evaluation."""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> str:
'''simple docstring'''
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ = (
dataset_class(
snake_case_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ = (
dataset_class(
snake_case_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ = (
dataset_class(
snake_case_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
UpperCAmelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ = data_args.n_val
UpperCAmelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ = trainer.predict(test_dataset=snake_case_ , metric_key_prefix="test" )
UpperCAmelCase_ = test_output.metrics
UpperCAmelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
UpperCAmelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
UpperCAmelCase_ = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( snake_case_ : str ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ = """true"""
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=82 , SCREAMING_SNAKE_CASE_ : str=16 ):
set_seed(42 )
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Union[str, Any]=82 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[int]=16 ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}"""
def _a ( SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["no"]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch["labels"] )
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase = batch["labels"]
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _a ( ):
__lowerCAmelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__lowerCAmelCase = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 5_12 )
accelerator.state._reset_state()
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 102 |
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A : Optional[int] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""input_features"""]
def __init__( self : Dict , __UpperCamelCase : int=8_0 , __UpperCamelCase : List[str]=1_6_0_0_0 , __UpperCamelCase : Optional[Any]=1_6_0 , __UpperCamelCase : Optional[Any]=3_0 , __UpperCamelCase : Optional[Any]=4_0_0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[int]=False , **__UpperCamelCase : Any , )->Tuple:
super().__init__(
feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = n_fft
_UpperCAmelCase = hop_length
_UpperCAmelCase = chunk_length
_UpperCAmelCase = chunk_length * sampling_rate
_UpperCAmelCase = self.n_samples // hop_length
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__UpperCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase__ ( self : int , __UpperCamelCase : np.array )->np.ndarray:
_UpperCAmelCase = spectrogram(
__UpperCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
_UpperCAmelCase = log_spec[:, :-1]
_UpperCAmelCase = np.maximum(__UpperCamelCase , log_spec.max() - 8.0 )
_UpperCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( __UpperCamelCase : List[np.ndarray] , __UpperCamelCase : List[np.ndarray] , __UpperCamelCase : float = 0.0 )->List[np.ndarray]:
if attention_mask is not None:
_UpperCAmelCase = np.array(__UpperCamelCase , np.intaa )
_UpperCAmelCase = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCAmelCase = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[str] = "max_length" , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , **__UpperCamelCase : Any , )->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_UpperCAmelCase = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCAmelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray([raw_speech] ).T]
_UpperCAmelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
_UpperCAmelCase = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
_UpperCAmelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
_UpperCAmelCase = [self._np_extract_fbank_features(__UpperCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCamelCase ):
_UpperCAmelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features]
else:
_UpperCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def lowercase__ ( self : Optional[Any] )->Dict[str, Any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase = True
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase = True
if a[i].islower():
_UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
_UpperCAmelCase : int = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / 'cache'
snake_case_ = test_hf_cache_home / 'datasets'
snake_case_ = test_hf_cache_home / 'metrics'
snake_case_ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(UpperCamelCase__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(UpperCamelCase__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(UpperCamelCase__ ) )
snake_case_ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(UpperCamelCase__ ) )
snake_case_ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='session' )
def __lowerCamelCase ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , UpperCamelCase__ )
@pytest.fixture
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , UpperCamelCase__ )
| 200 |
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
create_all_state(1 , UpperCamelCase__ , UpperCamelCase__ , [] , UpperCamelCase__ )
return result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase__ , total_number - level + 2 ):
current_list.append(UpperCamelCase__ )
create_all_state(i + 1 , UpperCamelCase__ , level - 1 , UpperCamelCase__ , UpperCamelCase__ )
current_list.pop()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = 4
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 200 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Dict:
super().__init__()
self.register_modules(vqvae=_lowercase , unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = "pil" , _lowercase = True , **_lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
a_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowercase , )
a_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a_ : int = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
a_ : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ : Any = {}
if accepts_eta:
a_ : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
a_ : int = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
a_ : Any = self.unet(_lowercase , _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
a_ : Tuple = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# decode the image latents with the VAE
a_ : Optional[int] = self.vqvae.decode(_lowercase ).sample
a_ : int = (image / 2 + 0.5).clamp(0 , 1 )
a_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : Tuple = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 248 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self ) -> List[Any]:
a_ , a_ : Any = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=_lowercase , dtype=jnp.bfloataa )
a_ , a_ : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
a_ : Union[str, Any] = controlnet_params
a_ : int = """bird"""
a_ : Tuple = jax.device_count()
a_ : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
a_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
a_ : Optional[Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
a_ : int = jax.random.PRNGKey(0 )
a_ : Union[str, Any] = jax.random.split(_lowercase , jax.device_count() )
a_ : Any = replicate(_lowercase )
a_ : Optional[int] = shard(_lowercase )
a_ : List[Any] = shard(_lowercase )
a_ : int = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a_ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a_ : str = images[0, 253:256, 253:256, -1]
a_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a_ : List[Any] = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self ) -> str:
a_ , a_ : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=_lowercase , dtype=jnp.bfloataa )
a_ , a_ : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
a_ : Tuple = controlnet_params
a_ : str = """Chef in the kitchen"""
a_ : Optional[Any] = jax.device_count()
a_ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
a_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
a_ : Any = pipe.prepare_image_inputs([pose_image] * num_samples )
a_ : str = jax.random.PRNGKey(0 )
a_ : int = jax.random.split(_lowercase , jax.device_count() )
a_ : Optional[int] = replicate(_lowercase )
a_ : Tuple = shard(_lowercase )
a_ : List[Any] = shard(_lowercase )
a_ : str = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a_ : List[str] = images[0, 253:256, 253:256, -1]
a_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a_ : Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 248 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a :str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
a :Optional[Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
a :Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a :Optional[int] = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
a :Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
a :List[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
a :Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a :Dict = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> str:
UpperCAmelCase : Any = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Dict ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase : int = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ) -> int:
UpperCAmelCase : List[str] = dct.pop(snake_case__ )
UpperCAmelCase : str = val
def a__ ( UpperCAmelCase : Optional[int] ) -> Optional[Any]:
if "handwritten" in checkpoint_url:
UpperCAmelCase : Dict = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Any = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ) -> str:
UpperCAmelCase : List[Any] = ViTConfig(image_size=384 , qkv_bias=snake_case__ )
UpperCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase : str = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase : Dict = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Any = 16
UpperCAmelCase : Any = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : str = '''relu'''
UpperCAmelCase : Optional[Any] = 1_024
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = False
UpperCAmelCase : List[Any] = False
# load HuggingFace model
UpperCAmelCase : int = ViTModel(snake_case__ , add_pooling_layer=snake_case__ )
UpperCAmelCase : int = TrOCRForCausalLM(snake_case__ )
UpperCAmelCase : int = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' , check_hash=snake_case__ )['''model''']
UpperCAmelCase : Tuple = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase : Optional[int] = state_dict.pop(snake_case__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
UpperCAmelCase : Tuple = val
else:
UpperCAmelCase : Union[str, Any] = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained('''roberta-large''' )
UpperCAmelCase : Any = TrOCRProcessor(snake_case__ , snake_case__ )
UpperCAmelCase : List[Any] = processor(images=prepare_img(snake_case__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
UpperCAmelCase : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase : str = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__ )
UpperCAmelCase : List[Any] = outputs.logits
UpperCAmelCase : Optional[Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase : Optional[Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1E-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 336 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a = logging.get_logger(__name__)
__a = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config is None:
assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase_ = self.model.config
else:
lowercase_ = config
lowercase_ = data_args
lowercase_ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ = label_smoothed_nll_loss
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
if self.optimizer is None:
lowercase_ = ['''bias''', '''LayerNorm.weight''']
lowercase_ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ = Adafactor
lowercase_ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ = AdamW
lowercase_ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ = self.args.learning_rate
if self.sharded_ddp:
lowercase_ = OSS(
params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
else:
lowercase_ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.lr_scheduler is None:
lowercase_ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
lowercase_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
return scheduler
def _lowercase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2]
else:
# compute label smoothed loss
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )
lowercase_ , lowercase_ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
lowercase_ = inputs.pop('''labels''' )
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return loss
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : nn.Module , SCREAMING_SNAKE_CASE_ : Dict[str, Union[torch.Tensor, Any]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
lowercase_ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **SCREAMING_SNAKE_CASE_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
lowercase_ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
# If PAD token is not defined at least EOS token has to be defined
lowercase_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f''' padded to `max_length`={max_length}''' )
lowercase_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ = tensor
return padded_tensor
| 30 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCAmelCase__ = """timesformer"""
def __init__( self : Tuple , _lowerCAmelCase : Any=2_2_4 , _lowerCAmelCase : str=1_6 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=8 , _lowerCAmelCase : Union[str, Any]=7_6_8 , _lowerCAmelCase : Dict=1_2 , _lowerCAmelCase : List[Any]=1_2 , _lowerCAmelCase : Optional[int]=3_0_7_2 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Any=1e-6 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple="divided_space_time" , _lowerCAmelCase : Optional[Any]=0 , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =num_frames
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =qkv_bias
__lowercase =attention_type
__lowercase =drop_path_rate
| 368 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]="binary" , _lowerCAmelCase : Tuple=None):
'''simple docstring'''
__lowercase =fa_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase)
return {"f1": float(_lowerCAmelCase) if score.size == 1 else score}
| 48 | 0 |
def __lowercase ( lowerCamelCase : int ):
if num < 0:
return False
UpperCamelCase_ : int = num
UpperCamelCase_ : Any = 0
while num > 0:
UpperCamelCase_ : Union[str, Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__A = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = True
A_ : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FlaxRoFormerModelTester(self )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__A = jnp.array([[0, 1, 2, 3, 4, 5]] )
__A = model(_lowerCamelCase )[0]
__A = 5_00_00
__A = (1, 6, vocab_size)
self.assertEqual(output.shape, _lowerCamelCase )
__A = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
| 266 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : str = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowercase : Tuple , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def a ( self : int , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Dict ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
return results
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any=False , _lowercase : Union[str, Any]=False ):
__UpperCAmelCase = load_image(inputs['''image'''] )
__UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : str , _lowercase : Optional[int] , _lowercase : Any=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 86 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : str=8 ):
UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : List[Any]=512 , UpperCamelCase : int=512 ):
UpperCAmelCase : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase : Tuple = np.array(pil_image.convert("""RGB""" ) )
UpperCAmelCase : List[str] = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase : Tuple = np.transpose(UpperCamelCase , [2, 0, 1] )
UpperCAmelCase : Any = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
UpperCAmelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}" )
UpperCAmelCase : Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase : Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
UpperCAmelCase : Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
UpperCAmelCase : Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
UpperCAmelCase : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase : Optional[Any] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase : Any = init_latents.shape
UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : List[str] = init_latents
return latents
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase : Any = torch.device(F"cuda:{gpu_id}" )
UpperCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase : Any = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase : List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
UpperCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 0.3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any = self._execution_device
UpperCAmelCase : Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Any = torch.cat(lowerCAmelCase_ , dim=0 )
UpperCAmelCase : int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
UpperCAmelCase : str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
UpperCAmelCase : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase : List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
UpperCAmelCase : Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
UpperCAmelCase : Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase : Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
UpperCAmelCase : Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = {'''image_embeds''': image_embeds}
UpperCAmelCase : Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase : int = noise_pred.chunk(2 )
UpperCAmelCase : int = variance_pred.chunk(2 )
UpperCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
UpperCAmelCase : Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase : int = image * 0.5 + 0.5
UpperCAmelCase : Any = image.clamp(0 , 1 )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 109 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase__ : Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase__ : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ConvBertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : List[Any]="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A: List[str] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_A: List[Any] = do_lower_case
_A: Optional[Any] = strip_accents
_A: Union[str, Any] = tokenize_chinese_chars
_A: Optional[int] = normalizer_class(**lowerCAmelCase_ )
_A: Optional[Any] = do_lower_case
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
_A: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Any = [self.sep_token_id]
_A: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
_A: str = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase : Union[str, Any] = False
# source code of `config_class`
_UpperCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Optional[Any] = True
break
_UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[str] = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 202 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : int = embeddings_size
_lowerCamelCase : Optional[Any] = hidden_sizes
_lowerCamelCase : Tuple = depths
_lowerCamelCase : Optional[Any] = is_training
_lowerCamelCase : Optional[Any] = use_labels
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Union[str, Any] = len(lowercase )
def A_ ( self ):
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = TFRegNetModel(config=lowercase )
_lowerCamelCase : Any = model(lowercase , training=lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = TFRegNetForImageClassification(lowercase )
_lowerCamelCase : List[str] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = TFRegNetModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A_ ( self ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def A_ ( self ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(lowercase )
_lowerCamelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A_ ( self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = model_class(lowercase )
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : int = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ):
_lowerCamelCase : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase )
_lowerCamelCase : int = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase , lowercase ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(lowercase , lowercase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase )
_lowerCamelCase : List[Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
_lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
_lowerCamelCase : Optional[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
_lowerCamelCase : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def A_ ( self ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFRegNetModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( ):
_lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCamelCase : Optional[int] = self.default_image_processor
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_lowerCamelCase : Dict = model(**lowercase , training=lowercase )
# verify the logits
_lowerCamelCase : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
_lowerCamelCase : str = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 96 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = "deta"
_UpperCAmelCase :Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=900 , _UpperCAmelCase=2048 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=300 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__: str = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = backbone_config.pop('''model_type''' )
lowercase__: str = CONFIG_MAPPING[backbone_model_type]
lowercase__: Optional[int] = config_class.from_dict(_UpperCAmelCase )
lowercase__: int = backbone_config
lowercase__: Any = num_queries
lowercase__: List[str] = max_position_embeddings
lowercase__: Optional[Any] = d_model
lowercase__: List[Any] = encoder_ffn_dim
lowercase__: Tuple = encoder_layers
lowercase__: Dict = encoder_attention_heads
lowercase__: Any = decoder_ffn_dim
lowercase__: Union[str, Any] = decoder_layers
lowercase__: List[Any] = decoder_attention_heads
lowercase__: int = dropout
lowercase__: List[str] = attention_dropout
lowercase__: Tuple = activation_dropout
lowercase__: Tuple = activation_function
lowercase__: int = init_std
lowercase__: Optional[Any] = init_xavier_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: Optional[int] = auxiliary_loss
lowercase__: Union[str, Any] = position_embedding_type
# deformable attributes
lowercase__: List[str] = num_feature_levels
lowercase__: Optional[Any] = encoder_n_points
lowercase__: int = decoder_n_points
lowercase__: str = two_stage
lowercase__: Optional[int] = two_stage_num_proposals
lowercase__: Tuple = with_box_refine
lowercase__: str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase__: Union[str, Any] = class_cost
lowercase__: Optional[int] = bbox_cost
lowercase__: int = giou_cost
# Loss coefficients
lowercase__: Optional[int] = mask_loss_coefficient
lowercase__: List[str] = dice_loss_coefficient
lowercase__: str = bbox_loss_coefficient
lowercase__: Union[str, Any] = giou_loss_coefficient
lowercase__: Optional[int] = eos_coefficient
lowercase__: str = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return self.encoder_attention_heads
@property
def _snake_case ( self ):
return self.d_model
def _snake_case ( self ):
lowercase__: Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase__: Dict = self.backbone_config.to_dict()
lowercase__: Union[str, Any] = self.__class__.model_type
return output
| 177 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[int] = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''time_series_transformer'''
UpperCAmelCase__: Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , A__ = None , A__ = None , A__ = "student_t" , A__ = "nll" , A__ = 1 , A__ = [1, 2, 3, 4, 5, 6, 7] , A__ = "mean" , A__ = 0 , A__ = 0 , A__ = 0 , A__ = 0 , A__ = None , A__ = None , A__ = 32 , A__ = 32 , A__ = 2 , A__ = 2 , A__ = 2 , A__ = 2 , A__ = True , A__ = "gelu" , A__ = 64 , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 100 , A__ = 0.0_2 , A__=True , **A__ , ):
# time series specific configuration
A__ : str = prediction_length
A__ : Optional[Any] = context_length or prediction_length
A__ : Any = distribution_output
A__ : Union[str, Any] = loss
A__ : Optional[Any] = input_size
A__ : List[str] = num_time_features
A__ : int = lags_sequence
A__ : Optional[Any] = scaling
A__ : str = num_dynamic_real_features
A__ : Tuple = num_static_real_features
A__ : int = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ : Tuple = cardinality
else:
A__ : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ : Optional[Any] = embedding_dimension
else:
A__ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ : str = num_parallel_samples
# Transformer architecture configuration
A__ : Union[str, Any] = input_size * len(A__ ) + self._number_of_features
A__ : Tuple = d_model
A__ : Optional[Any] = encoder_attention_heads
A__ : Dict = decoder_attention_heads
A__ : int = encoder_ffn_dim
A__ : Optional[Any] = decoder_ffn_dim
A__ : Tuple = encoder_layers
A__ : List[str] = decoder_layers
A__ : int = dropout
A__ : Optional[int] = attention_dropout
A__ : List[Any] = activation_dropout
A__ : str = encoder_layerdrop
A__ : str = decoder_layerdrop
A__ : List[Any] = activation_function
A__ : Optional[Any] = init_std
A__ : Any = use_cache
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def __A ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 141 |
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE__ = ['prompt']
SCREAMING_SNAKE_CASE__ = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE__ = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :int = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a :str = PriorTransformer(**_lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a :str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a :Tuple = CLIPVisionModelWithProjection(_lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.dummy_prior
a :int = self.dummy_image_encoder
a :Any = self.dummy_text_encoder
a :List[str] = self.dummy_tokenizer
a :Union[str, Any] = self.dummy_image_processor
a :List[Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_lowerCamelCase , clip_sample_range=10.0 , )
a :Optional[Any] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :str = torch.manual_seed(_lowerCamelCase )
else:
a :Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = '''cpu'''
a :Tuple = self.get_dummy_components()
a :Optional[int] = self.pipeline_class(**_lowerCamelCase )
a :Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
a :Optional[Any] = output.image_embeds
a :Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
a :Tuple = image[0, -10:]
a :int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a :Optional[int] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch_device == '''cpu'''
a :Union[str, Any] = True
a :int = False
self._test_inference_batch_single_identical(
test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , test_mean_pixel_difference=_lowerCamelCase , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = torch_device == '''cpu'''
a :Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowerCamelCase , test_mean_pixel_difference=_lowerCamelCase , )
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 100_0000 ):
"""simple docstring"""
a :Any = set(range(3 , UpperCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase_ , UpperCAmelCase_ ) ) )
a :Union[str, Any] = [float(UpperCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 94 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: List[Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Optional[int] = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Any = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__magic_name__: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[Any] = []
for i in range(_A ):
__magic_name__ : Dict = i / num_diffusion_timesteps
__magic_name__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = 1.0 - self.betas
__magic_name__ : str = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Tuple = 1.0
# setable values
__magic_name__ : List[Any] = None
__magic_name__ : int = torch.from_numpy(np.arange(0 , lowerCAmelCase__ )[::-1].copy() )
__magic_name__ : List[Any] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = num_inference_steps
__magic_name__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : int = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : str = torch.log(torch.clamp(lowerCAmelCase__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : List[str] = variance.log()
__magic_name__ : Optional[int] = beta.log()
__magic_name__ : Any = (predicted_variance + 1) / 2
__magic_name__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ ,__magic_name__ : List[Any] = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Union[str, Any] = t - 1
__magic_name__ : List[str] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Any = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] = self.betas[t]
__magic_name__ : int = self.alphas[t]
else:
__magic_name__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Tuple = 0
if t > 0:
__magic_name__ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device )
__magic_name__ : Tuple = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Tuple = variance
elif self.variance_type == "learned_range":
__magic_name__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : Tuple = variance * variance_noise
__magic_name__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__magic_name__ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Any = timesteps.to(original_samples.device )
__magic_name__ : int = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 138 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.