code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class lowercase__ ( _a ):
A__ : Optional[Any] =VOCAB_FILES_NAMES
A__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : Union[str, Any]="[GO]" , UpperCAmelCase_ : Tuple="[s]" , UpperCAmelCase_ : Any="[GO]" , **UpperCAmelCase_ : Optional[int] ):
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.vocab.items()}
@property
def A_ ( self : Optional[Any] ):
return len(self.vocab )
def A_ ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def A_ ( self : int , UpperCAmelCase_ : List[Any] ):
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def A_ ( self : str , UpperCAmelCase_ : List[Any] ):
return self.decoder.get(__lowerCamelCase )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__lowerCamelCase ) )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '\n' )
return (vocab_file,)
| 176 |
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : Dict = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase_ : Any = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase_ : Tuple = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] )
UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] )
else:
UpperCamelCase :Any = np.asarray(__lowerCamelCase )
UpperCamelCase :str = np.asarray(__lowerCamelCase )
if ignore_case:
UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase )
UpperCamelCase :Any = np.char.lower(__lowerCamelCase )
if ignore_punctuation:
UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
if ignore_numbers:
UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :int = predictions == references
return {"exact_match": np.mean(__lowerCamelCase ) * 100}
| 38 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : str = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : int = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__lowerCAmelCase : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__lowerCAmelCase : str = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
__lowerCAmelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : int = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase : Optional[Any] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
__lowerCAmelCase : List[str] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
__lowerCAmelCase : List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
__lowerCAmelCase : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(__lowerCamelCase , variant=__lowerCamelCase)) | 367 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __snake_case ,__snake_case = 2 ,__snake_case = 1 ,__snake_case = 3 ,) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__snake_case ,__snake_case ,__snake_case ) -> int:
return (pow(__snake_case ,2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
__lowerCAmelCase : Union[str, Any] = seed
__lowerCAmelCase : Optional[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowerCAmelCase : Tuple = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : Any = rand_fn(__snake_case ,__snake_case ,__snake_case )
__lowerCAmelCase : List[Any] = rand_fn(__snake_case ,__snake_case ,__snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowerCAmelCase : List[Any] = gcd(hare - tortoise ,__snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowerCAmelCase : str = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__snake_case : List[str] = parser.parse_args()
__snake_case : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
__snake_case : Any = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""") | 58 | 0 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
) | 189 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = DiTPipeline
__UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__snake_case : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , )
__snake_case : Union[str, Any] = AutoencoderKL()
__snake_case : int = DDIMScheduler()
__snake_case : Tuple = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=0 ) -> int:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = "cpu"
__snake_case : int = self.get_dummy_components()
__snake_case : str = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __snake_case ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__snake_case : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
__snake_case : Optional[Any] = pipe.get_label_ids(lowerCamelCase )
__snake_case : List[Any] = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : int = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__snake_case : Tuple = ["vase", "umbrella"]
__snake_case : List[str] = pipe.get_label_ids(lowerCamelCase )
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : str = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 123 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=1_0 , __lowerCamelCase=3 , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0 , __lowerCamelCase=0.02 , __lowerCamelCase=0.9 , __lowerCamelCase=None , ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
_SCREAMING_SNAKE_CASE : Dict = patch_size
_SCREAMING_SNAKE_CASE : int = tubelet_size
_SCREAMING_SNAKE_CASE : str = num_frames
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Optional[int] = use_labels
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Dict = mask_ratio
_SCREAMING_SNAKE_CASE : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_SCREAMING_SNAKE_CASE : Dict = int(mask_ratio * self.seq_length )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = VideoMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : str = VideoMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones((self.num_masks,) )
_SCREAMING_SNAKE_CASE : Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE : Dict = mask.expand(self.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , __lowerCamelCase )
# model only returns predictions for masked patches
_SCREAMING_SNAKE_CASE : List[str] = mask.sum().item()
_SCREAMING_SNAKE_CASE : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = VideoMAEModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(__lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_SCREAMING_SNAKE_CASE : int = torch.ones((self.model_tester.num_masks,) )
_SCREAMING_SNAKE_CASE : Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_SCREAMING_SNAKE_CASE : str = mask.expand(self.model_tester.batch_size , -1 ).bool()
_SCREAMING_SNAKE_CASE : List[Any] = bool_masked_pos.to(__lowerCamelCase )
if return_labels:
if model_class in [
*get_values(__lowerCamelCase ),
]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : Optional[Any] = VideoMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
if not self.has_attentions:
pass
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE : Optional[int] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Optional[Any] = True
_SCREAMING_SNAKE_CASE : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ ( self ) -> int:
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.model_tester.seq_length - self.model_tester.num_masks
_SCREAMING_SNAKE_CASE : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self ) -> str:
pass
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
_SCREAMING_SNAKE_CASE : List[str] = np.load(__lowerCamelCase )
return list(__lowerCamelCase )
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_video()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[Any] = prepare_video()
_SCREAMING_SNAKE_CASE : Optional[int] = image_processor(__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# add boolean mask, indicating which patches to mask
_SCREAMING_SNAKE_CASE : List[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__lowerCamelCase )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.5142] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_SCREAMING_SNAKE_CASE : List[Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=__lowerCamelCase ).to(
__lowerCamelCase )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = model(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.tensor(torch.tensor([0.6469] ) , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) ) | 325 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) | 325 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_a = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_a = [0, 25, 50]
_a = [25, 50, 75]
_a = fuzz.membership.trimf(X, abca)
_a = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_a = np.ones(75)
_a = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_a = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_a = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_a = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_a = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_a = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 39 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> float:
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(lowercase ,lowercase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
snake_case : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case : Any = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 176 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def __a ( __lowerCamelCase ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCamelCase, 0, __lowerCamelCase, args=(__lowerCamelCase) )[0]
def __a ( __lowerCamelCase, __lowerCamelCase ):
return math.pow(__lowerCamelCase, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61 |
"""simple docstring"""
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ : str = set(__lowerCamelCase ), [start]
while stack:
UpperCAmelCase_ : Any = stack.pop()
explored.add(__lowerCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCamelCase )
return explored
_a = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 61 | 1 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 363 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
while b:
lowerCamelCase , lowerCamelCase : Tuple = b, a % b
return a
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(a_, a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 205 | 0 |
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : Any , A : str , A : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = graph
self._normalize_graph(A , A )
_UpperCAmelCase : List[str] = len(A )
_UpperCAmelCase : Tuple = None
def _A ( self : Any , A : List[Any] , A : str ):
if sources is int:
_UpperCAmelCase : List[Any] = [sources]
if sinks is int:
_UpperCAmelCase : List[Any] = [sinks]
if len(A ) == 0 or len(A ) == 0:
return
_UpperCAmelCase : str = sources[0]
_UpperCAmelCase : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A ) > 1 or len(A ) > 1:
_UpperCAmelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_UpperCAmelCase : Optional[Any] = max_input_flow
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCAmelCase : Dict = max_input_flow
_UpperCAmelCase : List[Any] = size - 1
def _A ( self : Union[str, Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _A ( self : Tuple , A : Dict ):
_UpperCAmelCase : str = algorithm(self )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , A : str ):
_UpperCAmelCase : Optional[int] = flow_network
_UpperCAmelCase : Any = flow_network.verticesCount
_UpperCAmelCase : List[str] = flow_network.sourceIndex
_UpperCAmelCase : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCAmelCase : Any = flow_network.graph
_UpperCAmelCase : Union[str, Any] = False
def _A ( self : List[str] ):
if not self.executed:
self._algorithm()
_UpperCAmelCase : int = True
def _A ( self : List[Any] ):
pass
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[str, Any] ):
super().__init__(A )
# use this to save your result
_UpperCAmelCase : Any = -1
def _A ( self : Union[str, Any] ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , A : int ):
super().__init__(A )
_UpperCAmelCase : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCAmelCase : Union[str, Any] = [0] * self.verticies_count
_UpperCAmelCase : int = [0] * self.verticies_count
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCAmelCase : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCAmelCase : Any = 0
while i < len(A ):
_UpperCAmelCase : int = vertices_list[i]
_UpperCAmelCase : int = self.heights[vertex_index]
self.process_vertex(A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A ) )
_UpperCAmelCase : Union[str, Any] = 0
else:
i += 1
_UpperCAmelCase : List[Any] = sum(self.preflow[self.source_index] )
def _A ( self : Union[str, Any] , A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A , A )
self.relabel(A )
def _A ( self : int , A : Dict , A : List[str] ):
_UpperCAmelCase : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _A ( self : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : str = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCAmelCase : Tuple = self.heights[to_index]
if min_height is not None:
_UpperCAmelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = [0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__SCREAMING_SNAKE_CASE : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__SCREAMING_SNAKE_CASE : Optional[Any] = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 31 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''nielsr/canine-s''': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe0_00
_A = 0xe0_01
_A = 0xe0_02
_A = 0xe0_03
_A = 0xe0_04
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( __UpperCAmelCase ):
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase_ = UNICODE_VOCAB_SIZE
lowerCAmelCase_ = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return ()
| 278 | 0 |
'''simple docstring'''
from string import ascii_uppercase
_lowercase : Any = {char: i for i, char in enumerate(ascii_uppercase)}
_lowercase : Union[str, Any] = dict(enumerate(ascii_uppercase))
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
UpperCAmelCase = len(A )
UpperCAmelCase = 0
while True:
if x == i:
UpperCAmelCase = 0
if len(A ) == len(A ):
break
key += key[i]
i += 1
return key
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
UpperCAmelCase = ''''''
UpperCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
UpperCAmelCase = ''''''
UpperCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''THE GERMAN ATTACK'''
UpperCAmelCase = '''SECRET'''
UpperCAmelCase = generate_key(A , A )
UpperCAmelCase = cipher_text(A , A )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(A , A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[str, Any] = "openai-gpt"
__magic_name__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any]=40478 , lowerCAmelCase : str=512 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : int=12 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Tuple=1E-5 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]="cls_index" , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : int=0.1 , **lowerCAmelCase : Optional[int] , )-> str:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowerCAmelCase )
| 91 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[str] , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = RemBertConfig.from_json_file(A__ )
print("""Building PyTorch model from configuration: {}""".format(str(A__ ) ) )
lowerCAmelCase_ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 120 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase_ : int = set()
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = n + 1 # maximum limit
for a in range(2 , A__ ):
for b in range(2 , A__ ):
lowerCAmelCase_ : str = a**b # calculates the current power
collect_powers.add(A__ ) # adds the result to the set
return len(A__ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 120 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ : int = 'examples/'
lowerCAmelCase_ : Optional[int] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase_ : Dict = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase_ : Any = 'README.md'
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.read()
_a , _a = REPLACE_PATTERNS[pattern]
_a = replace.replace("VERSION" , lowercase )
_a = re_pattern.sub(lowercase , lowercase )
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase )
def _lowerCamelCase ( lowercase : int ) -> Optional[int]:
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="examples" )
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Any=False ) -> str:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def _lowerCamelCase ( ) -> Dict:
_a = "🤗 Transformers currently provides the following architectures"
_a = "1. Want to contribute a new model?"
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.readlines()
# Find the start of the list.
_a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_a = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase )
def _lowerCamelCase ( ) -> int:
with open(REPLACE_FILES["init"] , "r" ) as f:
_a = f.read()
_a = REPLACE_PATTERNS["init"][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def _lowerCamelCase ( lowercase : Dict=False ) -> Optional[int]:
_a = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_a = default_version.base_version
elif patch:
_a = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_a = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_a = input(F'Which version are you releasing? [{default_version}]' )
if len(lowercase ) == 0:
_a = default_version
print(F'Updating version to {version}.' )
global_version_update(lowercase , patch=lowercase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ) -> Dict:
_a = get_version()
_a = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
_a = current_version.base_version
# Check with the user we got that right.
_a = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowercase ) == 0:
_a = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowercase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase_ : List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 346 |
'''simple docstring'''
from random import randint, random
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : bool = False , lowercase : int = 5 , ) -> list:
_a = [[-1] * number_of_cells] # Create a highway without any car
_a = 0
_a = max(lowercase , 0 )
while i < number_of_cells:
_a = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( lowercase : list , lowercase : int ) -> int:
_a = 0
_a = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def _lowerCamelCase ( lowercase : list , lowercase : float , lowercase : int ) -> list:
_a = len(lowercase )
# Beforce calculations, the highway is empty
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_a = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
_a = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
_a = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
_a = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase ( lowercase : list , lowercase : int , lowercase : float , lowercase : int ) -> list:
_a = len(highway[0] )
for i in range(lowercase ):
_a = update(highway[i] , lowercase , lowercase )
_a = [-1] * number_of_cells
for car_index in range(lowercase ):
_a = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_a = (car_index + speed) % number_of_cells
# Commit the change of position
_a = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 | 1 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Union[str, Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Tuple = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
lowerCamelCase__ : Union[str, Any] = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
lowerCamelCase__ : Any = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : int = [element for element in set_a if element in set_b]
if alternative_union:
lowerCamelCase__ : int = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
lowerCamelCase__ : Dict = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {"""a""", """b""", """c""", """d""", """e"""}
_UpperCAmelCase : Tuple = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 50 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 | 1 |
'''simple docstring'''
import string
from math import logaa
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowerCamelCase_ =document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a_ ( __snake_case : str , __snake_case : str ) -> tuple[int, int]:
"""simple docstring"""
lowerCamelCase_ =corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ =corpus_without_punctuation.split('''\n''' )
lowerCamelCase_ =term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def a_ ( __snake_case : int , __snake_case : int , __snake_case : Dict=False ) -> float:
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def a_ ( __snake_case : int , __snake_case : int ) -> float:
"""simple docstring"""
return round(tf * idf , 3 )
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
SCREAMING_SNAKE_CASE_ : str = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
SCREAMING_SNAKE_CASE_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 364 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = tubelet_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A__ = (image_size // patch_size) ** 2
A__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A__ = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.batch_size , -1 ).bool()
A__ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
A__ = mask.sum().item()
A__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = VideoMAEModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
A__ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.model_tester.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
A__ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
def _snake_case ( ):
A__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A__ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A__ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size([1, 14_08, 15_36] )
A__ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A__ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 69 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__: str = logging.get_logger(__name__)
UpperCamelCase__: List[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """detr"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Tuple , __snake_case : Any=True , __snake_case : int=None , __snake_case : Dict=3 , __snake_case : Optional[Any]=100 , __snake_case : str=6 , __snake_case : Tuple=2048 , __snake_case : int=8 , __snake_case : List[Any]=6 , __snake_case : Optional[int]=2048 , __snake_case : Tuple=8 , __snake_case : Tuple=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : str=True , __snake_case : Tuple="relu" , __snake_case : Optional[Any]=256 , __snake_case : Optional[Any]=0.1 , __snake_case : Dict=0.0 , __snake_case : Any=0.0 , __snake_case : Union[str, Any]=0.02 , __snake_case : Tuple=1.0 , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]="sine" , __snake_case : Optional[Any]="resnet50" , __snake_case : str=True , __snake_case : List[Any]=False , __snake_case : Tuple=1 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=2 , __snake_case : int=1 , __snake_case : Optional[int]=1 , __snake_case : Union[str, Any]=5 , __snake_case : Any=2 , __snake_case : Optional[int]=0.1 , **__snake_case : Any , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Union[str, Any] = backbone_config.get('''model_type''' )
UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : int = config_class.from_dict(__snake_case )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = None, None, None
UpperCAmelCase : Optional[Any] = use_timm_backbone
UpperCAmelCase : Union[str, Any] = backbone_config
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Optional[Any] = num_queries
UpperCAmelCase : str = d_model
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : List[Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : List[str] = decoder_attention_heads
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : Union[str, Any] = activation_function
UpperCAmelCase : Any = init_std
UpperCAmelCase : List[str] = init_xavier_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : Optional[int] = decoder_layerdrop
UpperCAmelCase : Optional[int] = encoder_layers
UpperCAmelCase : Tuple = auxiliary_loss
UpperCAmelCase : Union[str, Any] = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : Optional[int] = use_pretrained_backbone
UpperCAmelCase : Optional[Any] = dilation
# Hungarian matcher
UpperCAmelCase : Optional[int] = class_cost
UpperCAmelCase : Optional[int] = bbox_cost
UpperCAmelCase : Tuple = giou_cost
# Loss coefficients
UpperCAmelCase : List[str] = mask_loss_coefficient
UpperCAmelCase : Union[str, Any] = dice_loss_coefficient
UpperCAmelCase : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase : Tuple = giou_loss_coefficient
UpperCAmelCase : Dict = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def A ( self : int ) -> int:
return self.d_model
@classmethod
def A ( cls : List[Any] , __snake_case : PretrainedConfig , **__snake_case : str ) -> str:
return cls(backbone_config=__snake_case , **__snake_case )
def A ( self : Union[str, Any] ) -> Dict[str, any]:
UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : int = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = version.parse("""1.11""" )
@property
def A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A ( self : List[str] ) -> float:
return 1E-5
@property
def A ( self : str ) -> int:
return 12
| 23 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase = BlipProcessor(lowerCAmelCase , lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).tokenizer
def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[str] ):
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowerCAmelCase )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowerCAmelCase )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 155 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( _a ):
lowerCAmelCase :Dict = """unispeech"""
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = feat_extract_norm
UpperCAmelCase__ : Dict = feat_extract_activation
UpperCAmelCase__ : List[str] = list(_lowerCamelCase)
UpperCAmelCase__ : List[str] = list(_lowerCamelCase)
UpperCAmelCase__ : Any = list(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = conv_bias
UpperCAmelCase__ : List[Any] = num_conv_pos_embeddings
UpperCAmelCase__ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase__ : Any = len(self.conv_dim)
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_dropout
UpperCAmelCase__ : Optional[Any] = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : List[str] = feat_proj_dropout
UpperCAmelCase__ : int = final_dropout
UpperCAmelCase__ : Optional[int] = layerdrop
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[int] = num_ctc_classes
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[Any] = do_stable_layer_norm
UpperCAmelCase__ : List[str] = use_weighted_layer_sum
UpperCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : str = apply_spec_augment
UpperCAmelCase__ : Tuple = mask_time_prob
UpperCAmelCase__ : Dict = mask_time_length
UpperCAmelCase__ : Tuple = mask_time_min_masks
UpperCAmelCase__ : List[Any] = mask_feature_prob
UpperCAmelCase__ : Tuple = mask_feature_length
UpperCAmelCase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ : List[Any] = num_codevectors_per_group
UpperCAmelCase__ : Optional[int] = num_codevector_groups
UpperCAmelCase__ : str = contrastive_logits_temperature
UpperCAmelCase__ : Tuple = feat_quantizer_dropout
UpperCAmelCase__ : Dict = num_negatives
UpperCAmelCase__ : Dict = codevector_dim
UpperCAmelCase__ : Union[str, Any] = proj_codevector_dim
UpperCAmelCase__ : int = diversity_loss_weight
# ctc loss
UpperCAmelCase__ : Union[str, Any] = ctc_loss_reduction
UpperCAmelCase__ : Optional[Any] = ctc_zero_infinity
# pretraining loss
UpperCAmelCase__ : str = replace_prob
@property
def snake_case__ ( self):
return functools.reduce(operator.mul , self.conv_stride , 1) | 368 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (64,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 256 , _lowerCamelCase = 32 , _lowerCamelCase = None , _lowerCamelCase = 0.18215 , _lowerCamelCase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : str = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
UpperCAmelCase__ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
# pass init params to Decoder
UpperCAmelCase__ : str = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Union[str, Any] = self.encoder(_lowerCamelCase)
UpperCAmelCase__ : str = self.quant_conv(_lowerCamelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase)
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.quantize(_lowerCamelCase)
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(_lowerCamelCase)
UpperCAmelCase__ : Any = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : Dict = self.encode(_lowerCamelCase).latents
UpperCAmelCase__ : List[str] = self.decode(_lowerCamelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase) | 283 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Any = filter(lambda A_ : p.requires_grad , model.parameters() )
a : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
if metric == "rouge2":
a : Union[str, Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a : Any = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a : Optional[int] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
a : Optional[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
a : Union[str, Any] = ModelCheckpoint(
dirpath=A_ , filename=A_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A_ , verbose=A_ , )
class _A ( pl.Callback ):
"""simple docstring"""
def __snake_case ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int):
a : Optional[Any] = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Tuple , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : Any=True):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''')
a : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a : Optional[Any] = Path(pl_module.hparams.output_dir)
if type_path == "test":
a : str = od / "test_results.txt"
a : str = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a : str = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
a : List[Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCAmelCase)
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase)
with open(__UpperCAmelCase , "a+") as writer:
for key in sorted(__UpperCAmelCase):
if key in ["log", "progress_bar", "preds"]:
continue
a : List[str] = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor):
a : str = val.item()
a : Dict = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCAmelCase)
if not save_generations:
return
if "preds" in metrics:
a : Any = "\n".join(metrics["preds"])
generations_file.open("w+").write(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
try:
a : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
a : Any = pl_module.model.num_parameters()
a : Dict = count_trainable_parameters(__UpperCAmelCase)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def __snake_case ( self : str , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test")
@rank_zero_only
def __snake_case ( self : Dict , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : Union[str, Any]):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 40 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__lowerCamelCase : str = 100
__lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__lowerCamelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( _lowerCAmelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase : set[int] = set()
UpperCamelCase : int
UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( _lowerCAmelCase = 5000 ) -> int | None:
for number_to_partition in range(1 , _lowerCAmelCase ):
if len(partition(_lowerCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
UpperCAmelCase__ = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE__ ) == len(set(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter a string ').strip()
UpperCAmelCase_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 61 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 1 |
from math import ceil
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Union[str, Any] = list(range(0 , _lowerCAmelCase ) )
A : Union[str, Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A : Any = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCAmelCase )
# Missing blocks
A : Union[str, Any] = [i for i in blocks if i not in device_map_blocks]
A : List[str] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCAmelCase ) )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = list(range(_lowerCAmelCase ) )
A : Union[str, Any] = int(ceil(n_layers / len(_lowerCAmelCase ) ) )
A : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowerCAmelCase , _lowerCAmelCase )]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
| 116 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase ):
__A : List[Any] = "maskformer"
__A : str = {"hidden_size": "mask_feature_size"}
__A : Dict = ["resnet", "swin"]
__A : Tuple = ["detr"]
def __init__( self : Dict , snake_case__ : int = 2_5_6 , snake_case__ : int = 2_5_6 , snake_case__ : float = 0.1 , snake_case__ : bool = False , snake_case__ : Optional[Dict] = None , snake_case__ : Optional[Dict] = None , snake_case__ : float = 0.02 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 1.0 , snake_case__ : float = 20.0 , snake_case__ : Optional[bool] = None , **snake_case__ : Tuple , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase :Tuple = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(snake_case__ , snake_case__ ):
lowercase :Optional[Any] = backbone_config.pop('''model_type''' )
lowercase :List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase :Union[str, Any] = config_class.from_dict(snake_case__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase :Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase :Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(snake_case__ , snake_case__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(snake_case__ , snake_case__ ):
lowercase :int = CONFIG_MAPPING[decoder_type]
lowercase :int = config_class.from_dict(snake_case__ )
lowercase :Tuple = backbone_config
lowercase :Tuple = decoder_config
# main feature dimension for the model
lowercase :List[Any] = fpn_feature_size
lowercase :List[Any] = mask_feature_size
# initializer
lowercase :Union[str, Any] = init_std
lowercase :Any = init_xavier_std
# Hungarian matcher && loss
lowercase :Any = cross_entropy_weight
lowercase :int = dice_weight
lowercase :Union[str, Any] = mask_weight
lowercase :Tuple = use_auxiliary_loss
lowercase :Union[str, Any] = no_object_weight
lowercase :List[str] = output_auxiliary_logits
lowercase :Dict = self.decoder_config.encoder_attention_heads
lowercase :int = self.decoder_config.num_hidden_layers
super().__init__(**snake_case__ )
@classmethod
def __snake_case ( cls : Tuple , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
return cls(
backbone_config=snake_case__ , decoder_config=snake_case__ , **snake_case__ , )
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :str = copy.deepcopy(self.__dict__ )
lowercase :Tuple = self.backbone_config.to_dict()
lowercase :Any = self.decoder_config.to_dict()
lowercase :List[str] = self.__class__.model_type
return output
| 172 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = KandinskyVaaPipeline
__A : Any = [
"image_embeds",
"negative_image_embeds",
]
__A : Tuple = ["image_embeds", "negative_image_embeds"]
__A : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A : Union[str, Any] = False
@property
def __snake_case ( self : str ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase :int = UNetaDConditionModel(**snake_case__ )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.dummy_unet
lowercase :List[Any] = self.dummy_movq
lowercase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
lowercase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str=0 ):
'''simple docstring'''
lowercase :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Optional[int] = torch.manual_seed(snake_case__ )
else:
lowercase :Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = '''cpu'''
lowercase :Tuple = self.get_dummy_components()
lowercase :Any = self.pipeline_class(**snake_case__ )
lowercase :List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase :str = output.images
lowercase :Dict = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase :Any = image[0, -3:, -3:, -1]
lowercase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :List[Any] = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowercase :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase :Tuple = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase :str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase :int = '''red cat, 4k photo'''
lowercase :str = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase , lowercase :Union[str, Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase :Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase :List[Any] = pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , output_type='''np''' , )
lowercase :Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 172 | 1 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a , __a=1024 , __a=1024 , __a=3.6 ):
'''simple docstring'''
__a : List[str] = tokenizer
__a : List[Any] = tokenizer.bos_token_id
__a : Tuple = dataset
__a : Optional[int] = seq_length
__a : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__a : Optional[Any] = iter(self.dataset )
__a : Optional[int] = True
while more_examples:
__a , __a : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__a )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__a : str = False
break
__a : Optional[int] = tokenizer(__a , truncation=__a )['input_ids']
__a : str = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__a ) , self.seq_length ):
__a : int = all_token_ids[i : i + self.seq_length]
if len(__a ) == self.seq_length:
yield torch.tensor(__a )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : List[Any] = {'streaming': True}
__a : int = load_dataset(args.dataset_name , split='train' , **_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
__a : Optional[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
model.eval()
__a : Any = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
with torch.no_grad():
__a : str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
__a : int = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__a : Union[str, Any] = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) )
try:
__a : int = torch.exp(_SCREAMING_SNAKE_CASE )
except OverflowError:
__a : str = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
__lowercase : List[Any] = Accelerator()
# Parse configuration
__lowercase : Optional[int] = HfArgumentParser(EvaluationArguments)
__lowercase : int = parser.parse_args()
set_seed(args.seed)
# Logging
__lowercase : str = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
__lowercase : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__lowercase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__lowercase : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
__lowercase , __lowercase : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
__lowercase , __lowercase : List[str] = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 27 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 1 |
"""simple docstring"""
import cmath
import math
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
lowercase_ : str = math.radians(__SCREAMING_SNAKE_CASE )
lowercase_ : int = math.radians(__SCREAMING_SNAKE_CASE )
# Convert voltage and current to rectangular form
lowercase_ : Optional[int] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
return None
class UpperCamelCase :
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
return None
class UpperCamelCase ( unittest.TestCase ):
lowercase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
from transformers import BertModel
lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCamelCase ) )
vocab_file.flush()
lowercase_ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase )
lowercase_ : int = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase )
lowercase_ : Tuple = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) ,1 )
self.assertEqual(len(__UpperCamelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 321 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : str =OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a__ : Any =OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a__ : Union[str, Any] =OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a__ : Any =OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a__ : Any =OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a__ : List[str] =OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a__ : Optional[int] =OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a__ : int =OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a__ : List[Any] =OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a__ : List[str] =OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a__ : str =OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a__ : int =OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a__ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a__ : Tuple =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a__ : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a__ : Optional[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a__ : Tuple =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a__ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a__ : Union[str, Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a__ : List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a__ : List[str] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a__ : Optional[int] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a__ : Dict =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a__ : str =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a__ : List[Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a__ : Union[str, Any] =_LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =FLAX_MODEL_MAPPING
a__ : List[Any] =auto_class_update(FlaxAutoModel)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a__ : List[Any] =auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a__ : Optional[int] =auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a__ : Tuple =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ : Any =auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ : int =auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a__ : Any =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ : Optional[Any] =auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a__ : Tuple =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a__ : Optional[int] =auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a__ : Optional[Any] =auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a__ : Union[str, Any] =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a__ : Tuple =auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 53 |
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase = '''base_with_context'''
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowercase__: Optional[int] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: List[str] = weights[f'layers_{lyr_num}']
lowercase__: List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: Any = ly_weight['attention']
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: str = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: str = weights[f'layers_{lyr_num}']
lowercase__: Optional[Any] = ly_weight['attention']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__: Optional[Any] = weights[f'layers_{lyr_num}']
lowercase__: Any = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = ly_weight['self_attention']
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = ly_weight['MultiHeadDotProductAttention_0']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case_ ( snake_case ) -> Any:
lowercase__: int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__: Tuple = jnp.tree_util.tree_map(onp.array , snake_case )
lowercase__: List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowercase__: List[Any] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowercase__: Optional[Any] = inference.parse_training_gin_file(snake_case , snake_case )
lowercase__: str = inference.InferenceModel(args.checkpoint_path , snake_case )
lowercase__: Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowercase__: List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__: Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case )
lowercase__: int = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case )
lowercase__: Optional[int] = load_decoder(ta_checkpoint['target']['decoder'] , snake_case )
lowercase__: int = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowercase__: List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 288 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
try:
snake_case_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
lowercase__ : Dict = parse_flag_from_env("RUN_SLOW", default=False)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skip("Test was skipped" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_A )
def lowerCamelCase__ ( _A=None , _A=None ):
'''simple docstring'''
if test_case is None:
return partial(_A , version=_A )
return unittest.skipUnless(is_torch_version(">=" , _A ) , f"test requires torch version >= {version}" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_A )
lowercase__ : List[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_A )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = True
@classmethod
def snake_case__ ( cls : List[Any] ):
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
@classmethod
def snake_case__ ( cls : List[str] ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : str ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
snake_case_ = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = AcceleratorState()
snake_case_ = tensor[None].clone().to(state.device )
snake_case_ = gather(_A ).cpu()
snake_case_ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _A ):
return False
return True
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , __lowercase : str , __lowercase : int , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = returncode
snake_case_ = stdout
snake_case_ = stderr
async def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
while True:
snake_case_ = await stream.readline()
if line:
callback(_A )
else:
break
async def lowerCamelCase__ ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(_A ) )
snake_case_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ = []
snake_case_ = []
def tee(_A , _A , _A , _A="" ):
snake_case_ = line.decode("utf-8" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="stderr:" ) ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def lowerCamelCase__ ( _A , _A=None , _A=None , _A=180 , _A=False , _A=True ):
'''simple docstring'''
snake_case_ = asyncio.get_event_loop()
snake_case_ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
snake_case_ = " ".join(_A )
if result.returncode > 0:
snake_case_ = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
try:
snake_case_ = subprocess.check_output(_A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_A , "decode" ):
snake_case_ = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(_A )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 187 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'''add_prefix_space''': True}
lowerCAmelCase_ = False
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
snake_case_ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def snake_case__ ( self : Union[str, Any] , **__lowercase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case__ ( self : Optional[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def snake_case__ ( self : Optional[int] , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = "lower newer"
snake_case_ = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = "lower newer"
snake_case_ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer(add_prefix_space=__lowercase )
snake_case_ = "lower newer"
# Testing tokenization
snake_case_ = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
snake_case_ = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
snake_case_ = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
snake_case_ = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
snake_case_ = self.get_rust_tokenizer(add_prefix_space=__lowercase )
snake_case_ = tokenizer.encode(__lowercase , add_prefix_space=__lowercase )
snake_case_ = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing the unknown token
snake_case_ = tokens + [rust_tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def snake_case__ ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : int , __lowercase : str=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
snake_case_ = "This is a simple input"
snake_case_ = ["This is a simple input 1", "This is a simple input 2"]
snake_case_ = ("This is a simple input", "This is a pair")
snake_case_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
snake_case_ = "This is a simple input"
snake_case_ = ["This is a simple input looooooooong", "This is a simple input"]
snake_case_ = ("This is a simple input", "This is a pair")
snake_case_ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer(__lowercase , padding="max_length" , max_length=30 , return_tensors="np" )
snake_case_ = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" )
snake_case_ = tokenizer(*__lowercase , padding="max_length" , max_length=60 , return_tensors="np" )
snake_case_ = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = "$$$"
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase )
snake_case_ = "This is a simple input"
snake_case_ = ["This is a simple input 1", "This is a simple input 2"]
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer(__lowercase )
snake_case_ = tokenizer(__lowercase )
self.assertEqual(out_s.input_ids[0] , __lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case_ = tokenizer.decode(out_s.input_ids )
snake_case_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
snake_case_ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
snake_case_ = "\nif len_a > len_b: result = a\nelse: result = b"
snake_case_ = tokenizer.encode(__lowercase )
snake_case_ = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
snake_case_ = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase )
self.assertEqual(__lowercase , __lowercase )
def snake_case__ ( self : Dict ):
"""simple docstring"""
pass
| 187 | 1 |
"""simple docstring"""
UpperCAmelCase_ : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = input('''Enter message: ''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
SCREAMING_SNAKE_CASE_ : int = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''encrypt'''
SCREAMING_SNAKE_CASE_ : List[str] = encrypt_message(__a , __a )
elif mode.lower().startswith('''d''' ):
SCREAMING_SNAKE_CASE_ : Tuple = '''decrypt'''
SCREAMING_SNAKE_CASE_ : Dict = decrypt_message(__a , __a )
print(f'\n{mode.title()}ed message:' )
print(__a )
def _A (__a , __a ) -> str:
"""simple docstring"""
return translate_message(__a , __a , '''encrypt''' )
def _A (__a , __a ) -> str:
"""simple docstring"""
return translate_message(__a , __a , '''decrypt''' )
def _A (__a , __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
SCREAMING_SNAKE_CASE_ : int = 0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 318 |
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318 | 1 |
import math
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 82 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return x + 2
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
__SCREAMING_SNAKE_CASE = "x = y"
__SCREAMING_SNAKE_CASE = {"y": 5}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 5, "y": 5} )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE = "y = add_two(x)"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "x = 3"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3} )
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "x = 3\ny = 5"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 5} )
def UpperCAmelCase_ ( self : Any ) -> Any:
__SCREAMING_SNAKE_CASE = "text = f'This is x: {x}.'"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "text": "This is x: 3."} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "if x <= 3:\n y = 2\nelse:\n y = 5"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 2} )
__SCREAMING_SNAKE_CASE = {"x": 8}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 8, "y": 5} )
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3, 5] )
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
def UpperCAmelCase_ ( self : Any ) -> int:
__SCREAMING_SNAKE_CASE = "y = x"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {} , state=UpperCAmelCase__ )
assert result == 3
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "y": 3} )
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = "test_list = [x, add_two(x)]\ntest_list[1]"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_list": [3, 5]} )
__SCREAMING_SNAKE_CASE = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
__SCREAMING_SNAKE_CASE = {"x": 3}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"add_two": add_two} , state=UpperCAmelCase__ )
assert result == 5
self.assertDictEqual(UpperCAmelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "x = 0\nfor i in range(3):\n x = i"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(UpperCAmelCase__ , {"range": range} , state=UpperCAmelCase__ )
assert result == 2
self.assertDictEqual(UpperCAmelCase__ , {"x": 2, "i": 2} )
| 54 | 0 |
'''simple docstring'''
import math
import sys
def _lowercase ( __A ):
'''simple docstring'''
if number != int(__A ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
__UpperCamelCase = [-1] * (number + 1)
__UpperCamelCase = 0
for i in range(1 ,number + 1 ):
__UpperCamelCase = sys.maxsize
__UpperCamelCase = int(math.sqrt(__A ) )
for j in range(1 ,root + 1 ):
__UpperCamelCase = 1 + answers[i - (j**2)]
__UpperCamelCase = min(__A ,__A )
__UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowercase ( __A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = s.rsplit(__A ,__A )
return new.join(__A )
def _lowercase ( __A ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
__UpperCamelCase = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
__UpperCamelCase = rreplace(__A ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
__UpperCamelCase = rreplace(__A ,""".b""" ,""".bias""" ,1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=True ):
'''simple docstring'''
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(__A ):
__UpperCamelCase = torch.load(__A )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(__A )
if isinstance(__A ,__A ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(__A )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(__A )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(__A ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(__A )
hf_model.load_state_dict(__A )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(__A )
__UpperCamelCase = count_parameters(__A )
assert torch.allclose(__A ,__A ,atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__A )
else:
return hf_state_dict
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 243 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = FlaxAutoencoderKL
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 4
_a = 3
_a = (32, 32)
_a = jax.random.PRNGKey(0 )
_a = jax.random.uniform(A , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_a = self.dummy_input
return init_dict, inputs_dict
| 211 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 'MCTCTFeatureExtractor'
__lowerCamelCase : Optional[Any] = 'AutoTokenizer'
def __init__(self , A , A ) -> Dict:
"""simple docstring"""
super().__init__(A , A )
_a = self.feature_extractor
_a = False
def __call__(self , *A , **A ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*A , **A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , A )
_a = kwargs.pop('''sampling_rate''' , A )
_a = kwargs.pop('''text''' , A )
if len(A ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
_a = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def a__ (self , *A , **A ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*A , **A )
def a__ (self , *A , **A ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*A , **A )
_a = kwargs.pop('''input_features''' , A )
_a = kwargs.pop('''labels''' , A )
if len(A ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(A , *A , **A )
if labels is not None:
_a = self.tokenizer.pad(A , **A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def a__ (self , *A , **A ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*A , **A )
@contextmanager
def a__ (self ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 211 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def _A ( snake_case ) -> np.ndarray:
_lowercase : List[str] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _A ( snake_case ) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def _A ( snake_case , snake_case ) -> np.ndarray:
_lowercase : Dict = np.zeros_like(snake_case )
_lowercase : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
_lowercase : str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
_lowercase : Dict = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
_lowercase : List[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_snake_case = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
_snake_case = np.array(Image.open(lena_path))
# kernel to be applied
_snake_case = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_snake_case = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_snake_case = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 359 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , ):
"""simple docstring"""
_lowercase : str = [file for file in os.listdir(_UpperCamelCase ) if os.path.isfile(os.path.join(_UpperCamelCase , _UpperCamelCase ) )]
if identifier is not None:
_lowercase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for n_ in n_identifier:
_lowercase : Dict = [file for file in files if n_ not in file]
else:
_lowercase : Optional[Any] = [file for file in files if n_identifier not in file]
_lowercase : Dict = ignore_files or []
ignore_files.append("__init__.py" )
_lowercase : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _UpperCamelCase )
if only_modules:
_lowercase : Optional[Any] = file.split("." )[0]
try:
_lowercase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
_lowercase : Optional[int] = doctest.DocTestSuite(_UpperCamelCase )
_lowercase : Tuple = unittest.TextTestRunner().run(_UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
_lowercase : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : str = "modeling"
_lowercase : Tuple = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase , ignore_files=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = Path("src/transformers" )
_lowercase : Any = "tokenization"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : Optional[Any] = "configuration"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = Path("src/transformers" )
_lowercase : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_UpperCamelCase , n_identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = Path("docs/source" )
_lowercase : int = ["favicon.ico"]
self.analyze_directory(_UpperCamelCase , ignore_files=_UpperCamelCase , only_modules=_UpperCamelCase )
| 199 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(length - 1 ):
__SCREAMING_SNAKE_CASE = i
for k in range(i + 1 , UpperCamelCase_ ):
if collection[k] < collection[least]:
__SCREAMING_SNAKE_CASE = k
if least != i:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 100 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_A : int = """CompVis/stable-diffusion-v1-1"""
_A : Any = """CompVis/stable-diffusion-v1-2"""
_A : Optional[int] = """CompVis/stable-diffusion-v1-3"""
_A : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
class a__ ( a_ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a = True , ):
super()._init_()
lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(_a )
lowercase : str = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Dict = StableDiffusionPipeline.from_pretrained(_a )
lowercase : Union[str, Any] = StableDiffusionPipeline(
vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , requires_safety_checker=_a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __magic_name__ ( self ):
return {k: getattr(self , _a ) for k in self.config.keys() if not k.startswith("_" )}
def __magic_name__ ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __magic_name__ ( self ):
self.enable_attention_slicing(_a )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def __magic_name__ ( self , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
lowercase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowercase : List[Any] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowercase : Any = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowercase : str = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowercase : Optional[int] = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 202 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( snake_case_ ):
_snake_case : str = ["""image_processor""", """tokenizer"""]
_snake_case : Optional[int] = """FlavaImageProcessor"""
_snake_case : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__a = kwargs.pop("feature_extractor" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
__a = self.image_processor
def __call__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__a = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
if images is not None:
__a = self.image_processor(
lowerCamelCase , return_image_mask=lowerCamelCase , return_codebook_pixels=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
if text is not None and images is not None:
encoding.update(lowerCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def a__ ( self ):
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def a__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 268 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase( a ):
__a = torch.exp(a )
__a = torch.sum(a , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__a = x
else:
__a = x
def a__ ( self , lowerCamelCase ):
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(lowerCamelCase )
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = BertEmbeddings(lowerCamelCase )
__a = DeeBertEncoder(lowerCamelCase )
__a = BertPooler(lowerCamelCase )
self.init_weights()
def a__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ):
return self.embeddings.word_embeddings
def a__ ( self , lowerCamelCase ):
__a = value
def a__ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__a = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
__a = message
__a = exit_layer # start from 1!
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = BertPooler(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , lowerCamelCase ):
# Pooler
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
__a = self.num_layers
try:
__a = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 268 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any=30 , __UpperCAmelCase : List[str]=400 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=1 / 255 , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , __UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , __UpperCAmelCase : List[Any]=True , ):
'''simple docstring'''
_A = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_pad
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=False ):
'''simple docstring'''
if not batched:
_A = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size["shortest_edge"] * h / w )
_A = self.size["shortest_edge"]
elif w > h:
_A = self.size["shortest_edge"]
_A = int(self.size["shortest_edge"] * w / h )
else:
_A = self.size["shortest_edge"]
_A = self.size["shortest_edge"]
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
_A = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = DetrImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "rescale_factor" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_pad" ) )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {"image_id": 39769, "annotations": target}
# encode them
_A = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
_A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_A = json.loads(f.read() )
_A = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
_A = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_A = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
_A = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
_A = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCAmelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCAmelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCAmelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCAmelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCAmelCase ) )
# verify masks
_A = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCAmelCase )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCAmelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCAmelCase ) )
| 79 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , _a : Dict , _a : List[str]=100 , _a : List[Any]=13 , _a : Optional[Any]=30 , _a : Any=2 , _a : List[str]=3 , _a : List[Any]=True , _a : str=True , _a : List[Any]=32 , _a : List[str]=5 , _a : str=4 , _a : Optional[int]=37 , _a : Optional[int]="gelu" , _a : Dict=0.1 , _a : int=0.1 , _a : List[str]=10 , _a : Tuple=0.02 , _a : List[Any]=3 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = vocab_size
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def A_ ( self : List[Any] ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def A_ ( self : Optional[int] , _a : str , _a : Union[str, Any] , _a : Any ):
UpperCamelCase__ = FlaxBeitModel(config=_a )
UpperCamelCase__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : int , _a : Optional[int] , _a : Optional[Any] , _a : List[Any] ):
UpperCamelCase__ = FlaxBeitForMaskedImageModeling(config=_a )
UpperCamelCase__ = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def A_ ( self : Union[str, Any] , _a : str , _a : str , _a : Tuple ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxBeitForImageClassification(config=_a )
UpperCamelCase__ = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxBeitForImageClassification(_a )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = FlaxBeitModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def A_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_a )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def A_ ( self : List[str] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(_a , _a )
UpperCamelCase__ = model_class(_a )
@jax.jit
def model_jitted(_a : Any , **_a : Any ):
return model(pixel_values=_a , **_a )
with self.subTest('''JIT Enabled''' ):
UpperCamelCase__ = model_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def A_ ( self : int ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A_ ( self : Any ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def A_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCamelCase__ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A_ ( self : Dict ):
UpperCamelCase__ = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_a , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase__ = np.ones((1, 196) , dtype=_a )
# forward pass
UpperCamelCase__ = model(pixel_values=_a , bool_masked_pos=_a )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 196, 8_192)
self.assertEqual(logits.shape , _a )
UpperCamelCase__ = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1E-2 ) )
@slow
def A_ ( self : Optional[int] ):
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_a , return_tensors='''np''' )
# forward pass
UpperCamelCase__ = model(**_a )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 1_000)
self.assertEqual(logits.shape , _a )
UpperCamelCase__ = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1E-4 ) )
UpperCamelCase__ = 281
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_a , return_tensors='''np''' )
# forward pass
UpperCamelCase__ = model(**_a )
UpperCamelCase__ = outputs.logits
# verify the logits
UpperCamelCase__ = (1, 21_841)
self.assertEqual(logits.shape , _a )
UpperCamelCase__ = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1E-4 ) )
UpperCamelCase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _a )
| 35 | from __future__ import annotations
lowercase = list[list[int]]
# assigning initial values to the grid
lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
UpperCamelCase__ = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(UpperCamelCase__, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 35 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
_a : int = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
_a : Optional[Any] = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[Any] = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase : Dict = bs[:]
_lowerCAmelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : List[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : int = set()
_lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Tuple = char
return pairs
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ):
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_lowerCAmelCase : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
_lowerCAmelCase : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
_lowerCAmelCase : Dict = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : List[Any] = json.load(a__ )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Dict = errors # how to handle errors in decoding
_lowerCAmelCase : Any = bytes_to_unicode()
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : int = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(a__ )
_lowerCAmelCase : Tuple = get_pairs(a__ )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = 0
while i < len(a__ ):
try:
_lowerCAmelCase : str = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Tuple = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : List[Any] = tuple(a__ )
_lowerCAmelCase : Optional[int] = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(a__ )
_lowerCAmelCase : str = """ """.join(a__ )
_lowerCAmelCase : str = word
return word
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = []
for token in re.findall(self.pat , a__ ):
_lowerCAmelCase : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(""" """ ) )
return bpe_tokens
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """""".join(a__ )
_lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Optional[int] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : List[str] = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__=False , **a__ ):
_lowerCAmelCase : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = """ """ + text
return (text, kwargs)
| 44 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = value
__A : List[Any] = None
__A : str = None
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = tree
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
'''simple docstring'''
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod() | 353 |
'''simple docstring'''
import argparse
lowercase__ : Any = '''docs/source/_static/js/custom.js'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> str:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[Any] = f.readlines()
__A : List[str] = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__A : Tuple = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowercase__ : List[str] = parser.parse_args()
update_custom_js(args.version) | 190 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Dict = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__=False):
'''simple docstring'''
a_ : Any = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
])
return rename_keys
def _UpperCAmelCase ( a__ , a__ , a__=False):
'''simple docstring'''
for i in range(config.num_hidden_layers):
if base_model:
a_ : str = """"""
else:
a_ : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''')
a_ : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
a_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
a_ : str = in_proj_bias[: config.hidden_size]
a_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
a_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a__ , a__)
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : int = dct.pop(a__)
a_ : Dict = val
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Tuple = Image.open(requests.get(a__ , stream=a__).raw)
return im
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__=True):
'''simple docstring'''
a_ : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
a_ : Any = 8
# set labels if required
if not base_model:
a_ : Any = 1_0_0_0
a_ : Any = """huggingface/label-files"""
a_ : List[Any] = """imagenet-1k-id2label.json"""
a_ : Optional[int] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : Optional[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : int = idalabel
a_ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
a_ : Optional[int] = 3_8_4
a_ : List[Any] = 1_5_3_6
a_ : List[Any] = 1_2
a_ : str = 6
# load original model from torch hub
a_ : Optional[Any] = torch.hub.load("""facebookresearch/dino:main""" , a__)
original_model.eval()
# load state_dict of original model, remove and rename some keys
a_ : str = original_model.state_dict()
if base_model:
remove_classification_head_(a__)
a_ : Union[str, Any] = create_rename_keys(a__ , base_model=a__)
for src, dest in rename_keys:
rename_key(a__ , a__ , a__)
read_in_q_k_v(a__ , a__ , a__)
# load HuggingFace model
if base_model:
a_ : Optional[Any] = ViTModel(a__ , add_pooling_layer=a__).eval()
else:
a_ : Any = ViTForImageClassification(a__).eval()
model.load_state_dict(a__)
# Check outputs on an image, prepared by ViTImageProcessor
a_ : Union[str, Any] = ViTImageProcessor()
a_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""")
a_ : int = encoding["""pixel_values"""]
a_ : Optional[int] = model(a__)
if base_model:
a_ : Tuple = original_model(a__)
assert torch.allclose(a__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1)
else:
a_ : List[str] = original_model(a__)
assert logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1e-3)
Path(a__).mkdir(exist_ok=a__)
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__snake_case : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 248 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__snake_case : Dict = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Dict = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = 8 , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Tuple = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_pad
a_ : Optional[int] = pad_size
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase ) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None ) -> int:
a_ , a_ : str = get_image_size(_lowercase )
a_ : Tuple = (old_height // size + 1) * size - old_height
a_ : List[Any] = (old_width // size + 1) * size - old_width
return pad(_lowercase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> List[str]:
a_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Tuple = do_pad if do_pad is not None else self.do_pad
a_ : Tuple = pad_size if pad_size is not None else self.pad_size
a_ : Tuple = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
a_ : Tuple = [to_numpy_array(_lowercase ) for image in images]
if do_rescale:
a_ : Dict = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_pad:
a_ : str = [self.pad(_lowercase , size=_lowercase ) for image in images]
a_ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 248 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = KandinskyVaaImgaImgPipeline
UpperCAmelCase__: Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__: str = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__: int = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__: Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Dict = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ : List[str] = UNetaDConditionModel(**A__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
A__ : Optional[int] = self.dummy_unet
A__ : Dict = self.dummy_movq
A__ : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A__ : List[str] = DDIMScheduler(**A__ )
A__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , A__ , A__=0 ):
A__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
A__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
A__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
A__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Dict = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(A__ ).startswith("""mps""" ):
A__ : Any = torch.manual_seed(A__ )
else:
A__ : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
A__ : str = """cpu"""
A__ : Any = self.get_dummy_components()
A__ : Union[str, Any] = self.pipeline_class(**A__ )
A__ : List[str] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : Dict = pipe(**self.get_dummy_inputs(A__ ) )
A__ : Any = output.images
A__ : List[str] = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
A__ : Optional[int] = image[0, -3:, -3:, -1]
A__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ : str = """A red cartoon frog, 4k"""
A__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
A__ : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A__ : List[str] = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
A__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ : Optional[Any] = pipe_prior(
A__ , generator=A__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ : str = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , generator=A__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
| 141 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182 | import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(_lowercase )
while True:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(itertools.islice(_lowercase , _lowercase ) )
if not chunk:
return
yield chunk
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE : List[str] = ''''''
if len(_lowercase ) < 2:
return dirty
for i in range(len(_lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowercase ) & 1:
clean += "X"
return clean
def A ( _lowercase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowercase )
return table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : Any = prepare_input(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = generate_table(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowercase , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(_lowercase ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = divmod(table.index(_lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 182 | 1 |
def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =len(snake_case_ )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase : Optional[int] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase : Tuple =arr[mi::-1] + arr[mi + 1 : len(snake_case_ )]
# Reverse whole list
UpperCAmelCase : List[str] =arr[cur - 1 :: -1] + arr[cur : len(snake_case_ )]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 367 | import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( lowerCamelCase__ ):
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Tuple ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : List[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[Any] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Optional[Any] ='''1'''
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Any ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : Union[str, Any] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Union[str, Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Any =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[str] =self.get_env()
UpperCAmelCase : Any =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCAmelCase : int ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Any =self.get_env()
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict ='''
from transformers import pipeline
'''
UpperCAmelCase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCAmelCase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[int] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCAmelCase : List[str] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any ='''
from transformers import AutoModel
'''
UpperCAmelCase : Optional[Any] ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Any ='''1'''
UpperCAmelCase : Dict =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 78 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__lowerCAmelCase = number_of_bytes // partitions
__lowerCAmelCase = []
for i in range(snake_case_ ):
__lowerCAmelCase = i * bytes_per_partition + 1
__lowerCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Dict , _snake_case : Tuple , _snake_case : str=7 , _snake_case : Optional[int]=3 , _snake_case : str=18 , _snake_case : Optional[Any]=30 , _snake_case : Any=400 , _snake_case : Dict=True , _snake_case : Optional[int]=None , _snake_case : Optional[int]=True , _snake_case : int=None , )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : Optional[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[Any] = min_resolution
__lowerCAmelCase : Union[str, Any] = max_resolution
__lowerCAmelCase : Dict = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : Optional[Any] = do_center_crop
__lowerCAmelCase : Optional[int] = crop_size
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size""" ) )
self.assertTrue(hasattr(lowercase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase_ , """crop_size""" ) )
def UpperCAmelCase__ ( self : Tuple )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Union[str, Any] )->int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
__lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self : List[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : int = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 369 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase = 'bart'
_UpperCAmelCase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase : str = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase : Tuple = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase : str = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase : str = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase : List[str] = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
if LOAD_DENSE_INDEX:
__lowerCAmelCase : List[str] = faiss.StandardGpuResources()
__lowerCAmelCase : Optional[int] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase : Optional[int] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase : int = faiss.IndexFlatIP(128 )
__lowerCAmelCase : Optional[int] = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase : Dict = (None, None)
__lowerCAmelCase : List[Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowerCAmelCase : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase : int = elia["""train_eli5"""]
__lowerCAmelCase : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_models()
_UpperCAmelCase , _UpperCAmelCase = load_train_data()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str]=10 ) -> List[str]:
__lowerCAmelCase : Optional[Any] = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict="wiki40b" , SCREAMING_SNAKE_CASE :List[Any]="dense" , SCREAMING_SNAKE_CASE :List[str]=10 ) -> str:
if source == "none":
__lowerCAmelCase , __lowerCAmelCase : Any = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name="""english_wiki40b_snippets_100w""" , n_results=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase : str = """question: {} context: {}""".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int=64 , SCREAMING_SNAKE_CASE :int=256 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :int=0.95 , SCREAMING_SNAKE_CASE :Any=0.8 ) -> str:
with torch.no_grad():
__lowerCAmelCase : Any = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_UpperCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_UpperCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_UpperCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
_UpperCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_UpperCAmelCase = action_list.index(action_st)
_UpperCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_UpperCAmelCase = show_type == 'Show full text of passages'
else:
_UpperCAmelCase = 3
_UpperCAmelCase = True
_UpperCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_UpperCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_UpperCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_UpperCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_UpperCAmelCase = 'wiki40b'
_UpperCAmelCase = 'dense'
_UpperCAmelCase = 'beam'
_UpperCAmelCase = 2
_UpperCAmelCase = 64
_UpperCAmelCase = 256
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
_UpperCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_UpperCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_UpperCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase = None
# start main text
_UpperCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_UpperCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase = st.text_input('Enter your question here:', '')
else:
_UpperCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
_UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase = support_list[:10]
_UpperCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_UpperCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_UpperCAmelCase = res[1].strip()
if sec_titles == "":
_UpperCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
_UpperCAmelCase = sec_titles.split(' & ')
_UpperCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase = find_nearest_training(question)
_UpperCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_UpperCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_UpperCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 232 | 0 |
"""simple docstring"""
import functools
from typing import Any
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowercase_ , lowercase_ ) or not all(
isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
a_ = {}
a_ = "WORD_KEEPER"
for word in words:
a_ = trie
for c in word:
if c not in trie_node:
a_ = {}
a_ = trie_node[c]
a_ = True
a_ = len(lowercase_ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
a_ = trie
for i in range(lowercase_ , lowercase_ ):
a_ = trie_node.get(string[i] , lowercase_ )
if trie_node is None:
return False
if trie_node.get(lowercase_ , lowercase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''megatron-bert'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=29_056 , snake_case_ : int=1_024 , snake_case_ : Optional[int]=24 , snake_case_ : str=16 , snake_case_ : str=4_096 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=512 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : Optional[Any]=0 , snake_case_ : int="absolute" , snake_case_ : List[str]=True , **snake_case_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
| 247 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__UpperCAmelCase = {
'openbmb/cpm-ant-10b': 1024,
}
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = collections.OrderedDict()
with open(__snake_case , 'r' , encoding='utf-8' ) as reader:
UpperCAmelCase_ : List[Any] = reader.readlines()
for index, token in enumerate(__snake_case ):
UpperCAmelCase_ : List[Any] = token.rstrip('\n' )
UpperCAmelCase_ : Union[str, Any] = index
return vocab
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase="<unk>" , _UpperCamelCase=2_0_0 ) -> Tuple:
UpperCAmelCase_ : Any = vocab
UpperCAmelCase_ : int = unk_token
UpperCAmelCase_ : Optional[int] = max_input_chars_per_word
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = []
while start < len(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = len(_UpperCamelCase )
UpperCAmelCase_ : int = None
while start < end:
UpperCAmelCase_ : Union[str, Any] = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase_ : Any = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = end
return sub_tokens
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Optional[Any] = ['''input_ids''', '''attention_mask''']
_snake_case : Any = False
def __init__( self , _UpperCamelCase , _UpperCamelCase="<d>" , _UpperCamelCase="</d>" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase="</n>" , _UpperCamelCase="</_>" , _UpperCamelCase="left" , **_UpperCamelCase , ) -> List[Any]:
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = bod_token
UpperCAmelCase_ : Optional[Any] = eod_token
UpperCAmelCase_ : List[str] = load_vocab(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.encoder[space_token]
UpperCAmelCase_ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase_ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
UpperCAmelCase_ : List[str] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : int = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCAmelCase ( self ) -> int:
return self.encoder[self.bod_token]
@property
def __UpperCAmelCase ( self ) -> Dict:
return self.encoder[self.eod_token]
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.encoder["\n"]
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def __UpperCAmelCase ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Optional[Any] = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Dict = [i for i in token_ids if i >= 0]
UpperCAmelCase_ : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
return token in self.encoder
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
return "".join(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Any = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCAmelCase_ : Dict = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCAmelCase_ : int = 0
if " " in self.encoder:
UpperCAmelCase_ : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase_ : Optional[int] = self.encoder['\n']
del self.encoder["\n"]
UpperCAmelCase_ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
UpperCAmelCase_ : Optional[int] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 145 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase_ : int = sum(array[:k] )
for i in range(len(__snake_case ) - k ):
UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 145 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : str , **lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_: int =deprecated_arg[3:]
SCREAMING_SNAKE_CASE_: Optional[Any] =not kwargs.pop(lowerCAmelCase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE_: str =kwargs.pop("""tpu_name""" , self.tpu_name )
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs.pop("""device_idx""" , self.device_idx )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""eager_mode""" , self.eager_mode )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**lowerCAmelCase )
UpperCamelCase : str = field(
default=UpperCAmelCase__ , metadata={'help': 'Name of TPU'} , )
UpperCamelCase : int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
UpperCamelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Benchmark models in eager model.'} )
UpperCamelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCamelCase__ ( self : Dict ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
SCREAMING_SNAKE_CASE_: List[str] =None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE_: Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE_: List[Any] =None
return tpu
@cached_property
def lowerCamelCase__ ( self : str ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE_: int =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
SCREAMING_SNAKE_CASE_: List[str] =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE_: Tuple =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCamelCase__ ( self : List[str] ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCamelCase__ ( self : List[Any] ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase__ ( self : Tuple ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 173 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class a :
def __init__( self : List[Any] , lowercase_ : int ):
snake_case_ = value
snake_case_ = None
snake_case_ = None
class a :
def __init__( self : str , lowercase_ : Node ):
snake_case_ = tree
def A_ ( self : List[str] , lowercase_ : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import math
from collections.abc import Callable
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = xa
snake_case_ = xa
while True:
if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
snake_case_ = x_na - (
function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ = x_na
snake_case_ = x_na
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 72 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE :Optional[int] = 16
SCREAMING_SNAKE_CASE :int = 32
def UpperCAmelCase ( a_ , a_ = 1_6 ) -> str:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained("bert-base-cased" )
__A = load_dataset("glue" , "mrpc" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A = 1_6
elif accelerator.mixed_precision != "no":
__A = 8
else:
__A = None
return tokenizer.pad(
a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , )
# Instantiate dataloaders.
__A = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__A = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE :int = mocked_dataloaders # noqa: F811
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1":
__A = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__A = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A = config["lr"]
__A = int(config["num_epochs"] )
__A = int(config["seed"] )
__A = int(config["batch_size"] )
set_seed(a_ )
__A , __A = get_dataloaders(a_ , a_ )
__A = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A = batch_size // MAX_GPU_BATCH_SIZE
__A = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A = model.to(accelerator.device )
# Instantiate optimizer
__A = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
__A = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A , __A , __A , __A , __A = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__A = os.path.split(a_ )[-1].split("." )[0]
accelerator.init_trackers(a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__A = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A = model(**a_ )
__A = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__A = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__A = model(**a_ )
__A = outputs.logits.argmax(dim=-1 )
__A , __A = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=a_ , references=a_ , )
__A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(a_ ),
"epoch": epoch,
} , step=a_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=a_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__A = parser.parse_args()
__A = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 15 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE :str = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE :Optional[int] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,):
super().__init__(**A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,)
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
__A = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
__A = self.convolution(self.padding(A ) )
__A = self.normalization(A )
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : RegNetConfig ,**A : str ):
super().__init__(**A )
__A = config.num_channels
__A = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ):
__A = shape_list(A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__A = tf.transpose(A ,perm=(0, 2, 3, 1) )
__A = self.embedder(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ):
super().__init__(**A )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" )
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ):
return self.normalization(self.convolution(A ) ,training=A )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict ,A : int ,A : int ,**A : str ):
super().__init__(**A )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
__A = [
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def UpperCamelCase_ ( self : Dict ,A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__A = self.pooler(A )
for layer_module in self.attention:
__A = layer_module(A )
__A = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : int ,A : Optional[int] ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict ,A : Any ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ):
super().__init__(**A )
__A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__A = [
# downsampling is done in the first layer with stride of 2
layer(A ,A ,A ,stride=A ,name="layers.0" ),
*[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Any ,A : List[str] ):
for layer_module in self.layers:
__A = layer_module(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ):
super().__init__(**A )
__A = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
__A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) )
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ):
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
snake_case_ = RegNetConfig
def __init__( self : int ,A : Optional[int] ,**A : Dict ):
super().__init__(**A )
__A = config
__A = TFRegNetEmbeddings(A ,name="embedder" )
__A = TFRegNetEncoder(A ,name="encoder" )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
@unpack_inputs
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(A ,training=A )
__A = self.encoder(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = encoder_outputs[0]
__A = self.pooler(A )
# Change to NCHW output format have uniformity in the modules
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = RegNetConfig
snake_case_ = "regnet"
snake_case_ = "pixel_values"
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ):
super().__init__(A ,*A ,**A )
__A = TFRegNetMainLayer(A ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ):
super().__init__(A ,*A ,**A )
__A = config.num_labels
__A = TFRegNetMainLayer(A ,name="regnet" )
# classification head
__A = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier[0](A )
__A = self.classifier[1](A )
__A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
| 15 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''encodec'''
def __init__( self , lowerCamelCase__=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCamelCase__=24_000 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=128 , lowerCamelCase__=32 , lowerCamelCase__=1 , lowerCamelCase__=[8, 5, 4, 2] , lowerCamelCase__="weight_norm" , lowerCamelCase__=7 , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__="reflect" , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__=1_024 , lowerCamelCase__=None , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Any:
'''simple docstring'''
__lowerCamelCase = target_bandwidths
__lowerCamelCase = sampling_rate
__lowerCamelCase = audio_channels
__lowerCamelCase = normalize
__lowerCamelCase = chunk_length_s
__lowerCamelCase = overlap
__lowerCamelCase = hidden_size
__lowerCamelCase = num_filters
__lowerCamelCase = num_residual_layers
__lowerCamelCase = upsampling_ratios
__lowerCamelCase = norm_type
__lowerCamelCase = kernel_size
__lowerCamelCase = last_kernel_size
__lowerCamelCase = residual_kernel_size
__lowerCamelCase = dilation_growth_rate
__lowerCamelCase = use_causal_conv
__lowerCamelCase = pad_mode
__lowerCamelCase = compress
__lowerCamelCase = num_lstm_layers
__lowerCamelCase = trim_right_ratio
__lowerCamelCase = codebook_size
__lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size
__lowerCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 348 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A: Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = ['input_features', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE="hamming_window" , _SCREAMING_SNAKE_CASE=3_2768.0 , _SCREAMING_SNAKE_CASE=0.97 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = feature_size
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : List[str] = padding_value
UpperCAmelCase : Dict = hop_length
UpperCAmelCase : str = win_length
UpperCAmelCase : int = frame_signal_scale
UpperCAmelCase : Tuple = preemphasis_coeff
UpperCAmelCase : Any = mel_floor
UpperCAmelCase : Optional[int] = normalize_means
UpperCAmelCase : Union[str, Any] = normalize_vars
UpperCAmelCase : Optional[int] = win_function
UpperCAmelCase : Optional[int] = return_attention_mask
UpperCAmelCase : Optional[Any] = win_length * sampling_rate // 1000
UpperCAmelCase : List[Any] = hop_length * sampling_rate // 1000
UpperCAmelCase : Optional[int] = optimal_fft_length(self.sample_size )
UpperCAmelCase : List[Any] = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = window_function(window_length=self.sample_size , name=self.win_function )
UpperCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCAmelCase : Optional[int] = spectrogram(
one_waveform * self.frame_signal_scale , window=_SCREAMING_SNAKE_CASE , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_SCREAMING_SNAKE_CASE , preemphasis=self.preemphasis_coeff , mel_filters=_SCREAMING_SNAKE_CASE , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if self.normalize_means:
UpperCAmelCase : str = x[:input_length].mean(axis=0 )
UpperCAmelCase : Optional[int] = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.normalize_vars:
UpperCAmelCase : Any = x[:input_length].std(axis=0 )
UpperCAmelCase : List[str] = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
UpperCAmelCase : Dict = padding_value
# make sure array is in float32
UpperCAmelCase : Dict = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
'''simple docstring'''
UpperCAmelCase : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.padding_value ) for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : List[str] = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Any = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCAmelCase : List[Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : Union[str, Any] = [raw_speech]
# extract fbank features
UpperCAmelCase : Optional[int] = [self._extract_mfsc_features(_SCREAMING_SNAKE_CASE ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase : Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCAmelCase : List[Any] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
UpperCAmelCase : Union[str, Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase : Union[str, Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase : Union[str, Any] = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase : Any = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 109 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Optional[int] = logging.get_logger(__name__)
A: Optional[int] = torch.device("cpu")
def _snake_case ( ):
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def _snake_case ( UpperCamelCase : int ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str ):
UpperCAmelCase : int = dct.pop(UpperCamelCase )
UpperCAmelCase : Any = val
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
UpperCAmelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCAmelCase : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCAmelCase : Tuple = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCAmelCase : List[Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCAmelCase : Any = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCAmelCase : int = k_new.split(""".""" )
if ls[2].isdigit():
UpperCAmelCase : List[Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : str = idalabel
UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase : Any = [3, 3, 6, 4]
UpperCAmelCase : List[str] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase : Dict = [3, 3, 9, 6]
UpperCAmelCase : Union[str, Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase : int = [4, 3, 10, 5]
UpperCAmelCase : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase : Union[str, Any] = [4, 4, 12, 6]
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" , check_hash=UpperCamelCase )
else:
UpperCAmelCase : Any = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : Optional[Any] = checkpoint
UpperCAmelCase : Dict = create_rename_keys(UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# load HuggingFace model
UpperCAmelCase : List[Any] = SwiftFormerForImageClassification(UpperCamelCase ).eval()
hf_model.load_state_dict(UpperCamelCase )
# prepare test inputs
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCAmelCase : Optional[int] = processor(images=UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
UpperCAmelCase : Optional[int] = get_expected_output(UpperCamelCase )
UpperCAmelCase : List[str] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase , atol=1e-3 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A: str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 | 1 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = ['''onnx''']
def __init__( self : Tuple , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple):
requires_backends(self , ["onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any]):
requires_backends(cls , ["onnx"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any]):
requires_backends(cls , ["onnx"])
| 127 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Tuple = args.pruning_method
__lowerCamelCase : int = args.threshold
__lowerCamelCase : List[str] = args.model_name_or_path.rstrip("/" )
__lowerCamelCase : Union[str, Any] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
__lowerCamelCase : List[Any] = torch.load(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
__lowerCamelCase : Optional[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCamelCase : List[Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowerCamelCase : Any = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
__lowerCamelCase : Optional[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowerCamelCase : Dict = MagnitudeBinarizer.apply(inputs=_lowerCamelCase , threshold=_lowerCamelCase )
__lowerCamelCase : Optional[int] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCamelCase : Dict = name[:-6]
__lowerCamelCase : int = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase : Union[str, Any] = TopKBinarizer.apply(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCamelCase : str = name[:-6]
__lowerCamelCase : List[Any] = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase : str = ThresholdBinarizer.apply(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCamelCase : int = name[:-6]
__lowerCamelCase : List[str] = model[F"""{prefix_}mask_scores"""]
__lowerCamelCase , __lowerCamelCase : Any = -0.1, 1.1
__lowerCamelCase : List[str] = torch.sigmoid(_lowerCamelCase )
__lowerCamelCase : List[Any] = s * (r - l) + l
__lowerCamelCase : List[str] = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCamelCase : Any = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__lowerCamelCase : str = os.path.join(
os.path.dirname(_lowerCamelCase ) , F"""bertarized_{os.path.basename(_lowerCamelCase )}""" )
if not os.path.isdir(_lowerCamelCase ):
shutil.copytree(_lowerCamelCase , _lowerCamelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__A = parser.parse_args()
main(args) | 135 | """simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
snake_case__ = 42
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase : Tuple[int] = (64,) , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "silu" , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : float = 0.1_8_2_1_5 , UpperCAmelCase : str = "group" , ):
super().__init__()
# pass init params to Encoder
__lowerCamelCase : Tuple = Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
__lowerCamelCase : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCamelCase : List[str] = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
__lowerCamelCase : List[Any] = VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.2_5 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
__lowerCamelCase : int = Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ):
__lowerCamelCase : List[Any] = self.encoder(UpperCAmelCase )
__lowerCamelCase : int = self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
# also go through quantization layer
if not force_not_quantize:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = self.quantize(UpperCAmelCase )
else:
__lowerCamelCase : Optional[Any] = h
__lowerCamelCase : int = self.post_quant_conv(UpperCAmelCase )
__lowerCamelCase : str = self.decoder(UpperCAmelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ):
__lowerCamelCase : Dict = sample
__lowerCamelCase : Optional[Any] = self.encode(UpperCAmelCase ).latents
__lowerCamelCase : Optional[Any] = self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase ) | 135 | 1 |
from functools import lru_cache
@lru_cache
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCAmelCase : int = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
__lowerCAmelCase : List[str] = numbers[i]
if number < 0:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = min_till_now, max_till_now
__lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , max_till_now * number )
__lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
__lowerCAmelCase : List[str] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod | 232 | 1 |
import mpmath # for roots of unity
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__=None , snake_case__=None ):
'''simple docstring'''
UpperCamelCase_ = list(poly_a or [0] )[:]
UpperCamelCase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCamelCase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCamelCase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCamelCase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCamelCase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCamelCase_ = self.__multiply()
def _lowerCamelCase ( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(snake_case__ ) <= 1:
return dft[0]
#
UpperCamelCase_ = self.c_max_length // 2
while next_ncol > 0:
UpperCamelCase_ = [[] for i in range(snake_case__ )]
UpperCamelCase_ = self.root**next_ncol
# First half of next step
UpperCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(snake_case__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCamelCase_ = new_dft
UpperCamelCase_ = next_ncol // 2
return dft[0]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.__dft("A" )
UpperCamelCase_ = self.__dft("B" )
UpperCamelCase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCamelCase_ = 2
while next_ncol <= self.c_max_length:
UpperCamelCase_ = [[] for i in range(snake_case__ )]
UpperCamelCase_ = self.root ** (next_ncol // 2)
UpperCamelCase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCamelCase_ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCamelCase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
'''simple docstring'''
UpperCamelCase_ = "A = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCamelCase_ = "B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCamelCase_ = "A*B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
def _lowerCAmelCase (_lowerCAmelCase):
if n_term == "":
return []
UpperCamelCase_ = []
for temp in range(int(_lowerCAmelCase)):
series.append(f"""1/{temp + 1}""" if series else "1")
return series
if __name__ == "__main__":
UpperCAmelCase : Optional[int] =input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 128 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : str=3 , snake_case__ : List[str]=3_2 , snake_case__ : int=3 , snake_case__ : Dict=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : Optional[Any]=[1, 1, 2, 1] , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Optional[int]="relu" , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[Any] = embeddings_size
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : List[str] = num_labels
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Any = len(snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __a ( self : Any , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : str = RegNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_labels
UpperCAmelCase__ : List[Any] = RegNetForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =(
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = RegNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __a ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __a ( self : str ):
'''simple docstring'''
pass
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(config=snake_case__ )
for name, module in model.named_modules():
if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def __a ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
UpperCAmelCase__ : Tuple = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ : Dict = layer_type
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def __a ( self : str ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = RegNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : List[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**snake_case__ )
# verify the logits
UpperCAmelCase__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : Dict = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 298 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase__ : Dict = TypeVar('KEY')
UpperCAmelCase__ : Dict = TypeVar('VAL')
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( Generic[KEY, VAL] ):
'''simple docstring'''
__UpperCamelCase : KEY
__UpperCamelCase : VAL
class UpperCAmelCase ( _Item ):
'''simple docstring'''
def __init__( self : Optional[int] ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __bool__( self : List[str] ):
"""simple docstring"""
return False
UpperCAmelCase__ : Tuple = _DeletedItem()
class UpperCAmelCase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : float = 0.75 ):
"""simple docstring"""
_A: Tuple = initial_block_size
_A: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_A: Optional[Any] = capacity_factor
_A: Optional[Any] = 0
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : KEY ):
"""simple docstring"""
return hash(lowerCAmelCase_ ) % len(self._buckets )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ):
"""simple docstring"""
_A: Optional[int] = self._buckets[ind]
if not stored:
_A: Tuple = _Item(lowerCAmelCase_ , lowerCAmelCase_ )
self._len += 1
return True
elif stored.key == key:
_A: Optional[int] = _Item(lowerCAmelCase_ , lowerCAmelCase_ )
return True
else:
return False
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
_A: List[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[int] = self._buckets
_A: Dict = [None] * new_size
_A: Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def __magic_name__ ( self : str , lowerCAmelCase_ : KEY ):
"""simple docstring"""
_A: List[str] = self._get_bucket_index(lowerCAmelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
_A: List[Any] = self._get_next_ind(lowerCAmelCase_ )
def __magic_name__ ( self : int , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ):
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
if self._try_set(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
break
def __setitem__( self : List[str] , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase_ , lowerCAmelCase_ )
def __delitem__( self : Union[str, Any] , lowerCAmelCase_ : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
_A: List[Any] = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase_ )
if item is _deleted:
continue
if item.key == key:
_A: Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , lowerCAmelCase_ : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
_A: Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self._len
def __iter__( self : Dict ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ):
"""simple docstring"""
_A: Optional[Any] = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 121 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCamelCase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__UpperCamelCase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__UpperCamelCase : str = "question"
__UpperCamelCase : str = "context"
__UpperCamelCase : str = "answers"
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 121 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Optional[int] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
snake_case__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=0 ) -> List[str]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : str ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self : str ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
self._test_save_load_local()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 195 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCamelCase):
A_ : Any = ['onnx']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ['onnx'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['onnx'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['onnx'] ) | 86 |
def A_ ( a ):
"""simple docstring"""
return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 253 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [True] * limit
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE : Optional[Any] = i * 2
while index < limit:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = index + i
SCREAMING_SNAKE_CASE : int = [2]
for i in range(3 , lowercase , 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def lowerCamelCase__ ( lowercase = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = prime_sieve(lowercase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : List[Any] = 0
for i in range(len(lowercase ) ):
for j in range(i + length , len(lowercase ) ):
SCREAMING_SNAKE_CASE : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE : Dict = j - i
SCREAMING_SNAKE_CASE : str = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 319 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowerCamelCase__ ( a = True , *a , **a ) -> Optional[Any]:
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_A: Optional[Any] = False
if main_process_only:
_A: Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*a , **a , disable=a )
| 121 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__: Dict = logging.get_logger(__name__)
a__: Tuple = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=3,__lowerCamelCase=100,__lowerCamelCase=6,__lowerCamelCase=2048,__lowerCamelCase=8,__lowerCamelCase=6,__lowerCamelCase=2048,__lowerCamelCase=8,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase="relu",__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.02,__lowerCamelCase=1.0,__lowerCamelCase=False,__lowerCamelCase="sine",__lowerCamelCase="resnet50",__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=1,__lowerCamelCase=5,__lowerCamelCase=2,__lowerCamelCase=1,__lowerCamelCase=1,__lowerCamelCase=5,__lowerCamelCase=2,__lowerCamelCase=0.1,**__lowerCamelCase,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = backbone_config.get('''model_type''' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ):
return self.d_model
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase,**__lowerCamelCase ):
return cls(backbone_config=__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def UpperCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self ):
return 1E-5
@property
def UpperCamelCase ( self ):
return 12
| 39 |
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_MASKED_LM_MAPPING
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
__a =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 3_8015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 2_5506, 'token_str': ' accuser'},
] , )
__a =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 3_8015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 2_5506,
'token_str': ' accuser',
},
] , )
__a =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
__a =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 3_5676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 1_6416, 'token_str': 'ELS'},
] , )
__a =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 3_5676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 1_6416, 'token_str': 'ELS'},
] , )
__a =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 1_3606, 'token_str': ' Clara'},
] , )
__a =unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
__a =pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(__snake_case )
@slow
@require_tf
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
__a =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_2790,
'token_str': ' Lyon',
},
] , )
__a =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
__a =None
__a =None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
__a =None
__a =None
self.run_pipeline_test(__snake_case , [] )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__a =[
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =fill_masker.tokenizer
__a =fill_masker.model
__a =fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
__a =fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
__a =fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__snake_case , [
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('This is' )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =tokenizer.get_vocab()
__a =sorted(vocab.keys() )[:2]
# Pipeline argument
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
__a =fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
__a ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __snake_case )
__a =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__snake_case ) )
# Call argument
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets=__snake_case )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
__a ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __snake_case )
__a =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__snake_case ) )
# Score equivalence
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets=__snake_case )
__a =[top_mask['token_str'] for top_mask in outputs]
__a =[top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets=__snake_case )
__a =[top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets=[''] )
with self.assertRaises(__snake_case ):
__a =fill_masker(f'This is a {tokenizer.mask_token}' , targets='' )
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
__a =fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__a =fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__snake_case , [
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =tokenizer.get_vocab()
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
__a =sorted(vocab.keys() )[:3]
__a =fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__a =[el['token_str'] for el in sorted(__snake_case , key=lambda __snake_case : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
__a =fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__a =tokenizer.get_vocab()
# String duplicates + id duplicates
__a =sorted(vocab.keys() )[:3]
__a =[targets[0], targets[1], targets[0], targets[2], targets[1]]
__a =fill_masker(f'My name is {tokenizer.mask_token}' , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__a =fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__snake_case , [
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
] , )
| 218 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase : Optional[Any] = False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a =torch.manual_seed(0 )
__a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCAmelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase : int = ""
else:
_lowerCAmelCase : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ViTConfig()
_lowerCAmelCase : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCAmelCase : str = True
_lowerCAmelCase : List[str] = int(vit_name[-12:-10] )
_lowerCAmelCase : str = int(vit_name[-9:-6] )
else:
_lowerCAmelCase : List[str] = 1_000
_lowerCAmelCase : int = "huggingface/label-files"
_lowerCAmelCase : Dict = "imagenet-1k-id2label.json"
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : str = int(vit_name[-6:-4] )
_lowerCAmelCase : List[str] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCAmelCase : str = 192
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : str = 12
_lowerCAmelCase : Any = 3
elif vit_name[9:].startswith("small" ):
_lowerCAmelCase : Any = 384
_lowerCAmelCase : Any = 1_536
_lowerCAmelCase : List[str] = 12
_lowerCAmelCase : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : str = 2_304
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCAmelCase : Optional[Any] = 1_024
_lowerCAmelCase : List[str] = 4_096
_lowerCAmelCase : Dict = 24
_lowerCAmelCase : int = 16
elif vit_name[4:].startswith("huge" ):
_lowerCAmelCase : Union[str, Any] = 1_280
_lowerCAmelCase : Optional[int] = 5_120
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : str = 16
# load original model from timm
_lowerCAmelCase : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCAmelCase : Optional[int] = ViTModel(_lowerCamelCase ).eval()
else:
_lowerCAmelCase : Optional[int] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCAmelCase : Tuple = DeiTImageProcessor(size=config.image_size )
else:
_lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
_lowerCAmelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase : Union[str, Any] = encoding["pixel_values"]
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
if base_model:
_lowerCAmelCase : List[str] = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
_lowerCAmelCase : Any = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 300 |
_snake_case = 8.3144598
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 300 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "informer"
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Tuple , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[int] = None , UpperCamelCase: str = "student_t" , UpperCamelCase: str = "nll" , UpperCamelCase: int = 1 , UpperCamelCase: List[int] = None , UpperCamelCase: Optional[Union[str, bool]] = "mean" , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: int = 0 , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: int = 64 , UpperCamelCase: int = 32 , UpperCamelCase: int = 32 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: int = 2 , UpperCamelCase: bool = True , UpperCamelCase: str = "gelu" , UpperCamelCase: float = 0.05 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: float = 0.1 , UpperCamelCase: int = 1_00 , UpperCamelCase: float = 0.02 , UpperCamelCase: int=True , UpperCamelCase: str = "prob" , UpperCamelCase: int = 5 , UpperCamelCase: bool = True , **UpperCamelCase: Optional[int] , ) -> Union[str, Any]:
# time series specific configuration
snake_case__ = prediction_length
snake_case__ = context_length or prediction_length
snake_case__ = distribution_output
snake_case__ = loss
snake_case__ = input_size
snake_case__ = num_time_features
snake_case__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case__ = scaling
snake_case__ = num_dynamic_real_features
snake_case__ = num_static_real_features
snake_case__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case__ = cardinality
else:
snake_case__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case__ = embedding_dimension
else:
snake_case__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ = num_parallel_samples
# Transformer architecture configuration
snake_case__ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ = d_model
snake_case__ = encoder_attention_heads
snake_case__ = decoder_attention_heads
snake_case__ = encoder_ffn_dim
snake_case__ = decoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = decoder_layers
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = use_cache
# Informer
snake_case__ = attention_type
snake_case__ = sampling_factor
snake_case__ = distil
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 307 |
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307 | 1 |
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
__A = 0
__A = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 124 |
def UpperCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
SCREAMING_SNAKE_CASE :List[str] = generate_large_matrix()
SCREAMING_SNAKE_CASE :str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__A = (left + right) // 2
__A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__A = mid + 1
else:
__A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(grid[0] )
for i in range(len(a_ ) ):
__A = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__A = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__A = timeit(F'''{func}(grid=grid)''' , setup=a_ , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 1 |
from timeit import timeit
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__UpperCamelCase :str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__UpperCamelCase :Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase ( ):
'''simple docstring'''
def do_benchmark(SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE ) = }""" )
__UpperCamelCase :Optional[int] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=SCREAMING_SNAKE_CASE )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE ) = }""" )
__UpperCamelCase :Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=SCREAMING_SNAKE_CASE , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
SCREAMING_SNAKE_CASE :int = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = []
def __init__( self : Any ,A : List[str] ,A : str="<unk>" ,A : int="<s>" ,A : Union[str, Any]="</s>" ,A : List[str]="<pad>" ,A : int="[SEP]" ,A : Optional[Any]="[MASK]" ,A : Tuple="[CLS]" ,A : Optional[Dict[str, Any]] = None ,**A : Any ,):
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,pad_token=A ,sep_token=A ,mask_token=A ,cls_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCamelCase_ ( self : List[str] ):
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self : Optional[Any] ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : str ,A : Optional[Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Tuple ):
return self.sp_model.piece_to_id(A )
def UpperCamelCase_ ( self : List[Any] ,A : Tuple ):
__A = self.sp_model.IdToPiece(A )
return token
def UpperCamelCase_ ( self : List[Any] ,A : int ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCamelCase_ ( self : Tuple ,A : List[int] ,A : bool = False ,A : bool = None ,A : bool = True ,**A : Union[str, Any] ,):
__A = kwargs.pop("use_source_tokenizer" ,A )
__A = self.convert_ids_to_tokens(A ,skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A = []
__A = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
__A = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A = re.sub(R" (\[(MASK|SEP)\])" ,R"\1" ," ".join(A ) )
else:
__A = "".join(A )
__A = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A = self.clean_up_tokenization(A )
return clean_text
else:
return text
def UpperCamelCase_ ( self : str ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Any ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 15 | 0 |
_A = "Alexander Joslin"
import operator as op
from .stack import Stack
def lowercase_ ( A__ ):
"""simple docstring"""
snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case = Stack()
snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
snake_case = operator_stack.peek()
operator_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 356 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( A__ , A__ ) -> int:
"""simple docstring"""
snake_case = RobertaPreLayerNormConfig.from_pretrained(
A__ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
snake_case = torch.load(hf_hub_download(repo_id=A__ , filename="pytorch_model.bin" ) )
snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
snake_case = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
snake_case = tensor_value
snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
model.save_pretrained(A__ )
# convert tokenizer
snake_case = AutoTokenizer.from_pretrained(A__ )
tokenizer.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 137 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
UpperCamelCase__ = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
__lowerCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
class a__ ( snake_case__ ):
_a : str = VOCAB_FILES_NAMES
_a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.encoder )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = get_pairs(_A )
if not pairs:
return token
while True:
__lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(_A ):
try:
__lowerCAmelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = new_word
if len(_A ) == 1:
break
else:
__lowerCAmelCase = get_pairs(_A )
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = word
return word
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for token in re.findall(self.pat , _A ):
__lowerCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.decoder.get(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "".join(_A )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
__lowerCAmelCase = 0
with open(_A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
__lowerCAmelCase = " " + text
return (text, kwargs)
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = self.encode(_A )
if len(_A ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 92 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Any = TypeVar("T")
class a ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ) -> None:
_a = None
_a = len(__magic_name__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : Union[str, Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a_ : Dict = SegmentTree(test_array, min)
a_ : Optional[int] = SegmentTree(test_array, max)
a_ : int = SegmentTree(test_array, lambda a, b: a + b)
def _A () -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 168 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Dict = LEDTokenizer
snake_case : List[str] = LEDTokenizerFast
snake_case : Any = True
def snake_case_ (self ):
super().setUp()
_UpperCAmelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCAmelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : int = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def snake_case_ (self , **lowerCAmelCase__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case_ (self , **lowerCAmelCase__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
return "lower newer", "lower newer"
@cached_property
def snake_case_ (self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def snake_case_ (self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_UpperCAmelCase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowerCAmelCase__ )
self.assertIn("""attention_mask""" , lowerCAmelCase__ )
self.assertNotIn("""labels""" , lowerCAmelCase__ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase__ )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : str = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def snake_case_ (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
_UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Any = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" )
_UpperCAmelCase : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ , return_tensors="""pt""" )
_UpperCAmelCase : int = inputs["""input_ids"""]
_UpperCAmelCase : Union[str, Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case_ (self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = ["""Summary of the text.""", """Another summary."""]
_UpperCAmelCase : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCAmelCase : str = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
_UpperCAmelCase : str = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["""input_ids"""]]
_UpperCAmelCase : Union[str, Any] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowerCAmelCase__ )
def snake_case_ (self ):
pass
def snake_case_ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase : Any = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 352 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
_UpperCAmelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 170 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase__ : Optional[int] = set()
# Replace all the whitespace in our sentence
UpperCamelCase__ : Dict = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCAmelCase ) == 26
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase__ : List[str] = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase__ : List[Any] = True
elif char.isupper():
UpperCamelCase__ : Dict = True
return all(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
UpperCamelCase__ : List[Any] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__UpperCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=__UpperCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=__UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 189 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 225 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowercase__ ( snake_case_ :str , snake_case_ :str = "cpu" , snake_case_ :Union[str, None] = None ):
__UpperCAmelCase = torch.load(snake_case_ , map_location=snake_case_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase = src_path
torch.save(snake_case_ , snake_case_ )
if __name__ == "__main__":
fire.Fire(convert)
| 332 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "swinv2"
a__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__UpperCAmelCase = (0, 0, 0, 0)
| 332 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase :Optional[int] = get_logger(__name__)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _A : Tuple = None ) -> Dict:
__magic_name__ : Any = (
os.path.join(SCREAMING_SNAKE_CASE_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ : List[Any] = Extractor
def __lowerCAmelCase ( self : str , _A : List[str] ) -> Union[str, Any]:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ : Tuple = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : List[Any] ) -> Any:
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def __lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : Optional[int] = False ) -> List[Any]:
__magic_name__ : Any = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
__magic_name__ : Any = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output_path
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def __lowerCAmelCase ( cls : Optional[int] , _A : List[str] , **_A : Optional[int] ) -> List[str]:
...
@staticmethod
@abstractmethod
def __lowerCAmelCase ( _A : Optional[int] , _A : List[Any] ) -> Union[str, Any]:
...
class _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
A_ : List[bytes] = []
@staticmethod
def __lowerCAmelCase ( _A : Any , _A : Optional[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def __lowerCAmelCase ( cls : str , _A : str , _A : List[Any] = b"" ) -> Union[str, Any]:
if not magic_number:
__magic_name__ : Tuple = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ : List[Any] = cls.read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , _A : Union[str, Any] , **_A : List[Any] ) -> Optional[int]:
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Union[str, Any] ) -> str:
def resolved(_A : Tuple ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(_A : Dict , _A : List[str] ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(_A : Optional[int] , _A : Dict ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ : Any = resolved(os.path.join(SCREAMING_SNAKE_CASE_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE_ )
__magic_name__ : List[str] = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE_ ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __lowerCAmelCase ( _A : Optional[Any] , _A : Union[str, Any] ) -> List[str]:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__magic_name__ : List[Any] = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : List[Any] = [b'\x1F\x8B']
@staticmethod
def __lowerCAmelCase ( _A : int , _A : Any ) -> Union[str, Any]:
with gzip.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : Tuple = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , _A : List[str] , _A : List[str] = b"" ) -> List[str]:
if super().is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as fp:
__magic_name__ : Tuple = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ : Tuple = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
__magic_name__ : int = struct.unpack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __lowerCAmelCase ( _A : Union[str, Any] , _A : List[Any] ) -> Dict:
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : Optional[Any] = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : int ) -> Tuple:
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : List[str] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[int] ) -> Union[str, Any]:
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__magic_name__ : Dict = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : List[str] = [b'\x28\xb5\x2F\xFD']
@staticmethod
def __lowerCAmelCase ( _A : List[Any] , _A : List[Any] ) -> Any:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
__magic_name__ : List[str] = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_ , 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : Any = [b'\x42\x5A\x68']
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : int ) -> List[Any]:
with bza.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : str = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def __lowerCAmelCase ( _A : Optional[Any] , _A : Optional[Any] ) -> Optional[Any]:
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
A_ : Dict = [b'\x04\x22\x4D\x18']
@staticmethod
def __lowerCAmelCase ( _A : int , _A : int ) -> Tuple:
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class _lowerCamelCase :
'''simple docstring'''
A_ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __lowerCAmelCase ( cls : Any ) -> Union[str, Any]:
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __lowerCAmelCase ( _A : Union[str, Any] , _A : Any ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ , magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def __lowerCAmelCase ( cls : Dict , _A : Optional[int] , _A : int = False ) -> Tuple:
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
__magic_name__ : Tuple = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __lowerCAmelCase ( cls : Tuple , _A : Tuple ) -> Dict: # <Added version="2.4.0"/>
__magic_name__ : Union[str, Any] = cls._get_magic_number_max_length()
__magic_name__ : str = cls._read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def __lowerCAmelCase ( cls : str , _A : Union[str, Any] , _A : int , _A : Dict = None , _A : Optional[Any] = "deprecated" , ) -> Optional[Any]:
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
__magic_name__ : List[Any] = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
__magic_name__ : Union[str, Any] = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ : str = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 355 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 275 | 0 |
"""simple docstring"""
import datasets
lowercase__ : Any = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
lowercase__ : str = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowercase__ : Tuple = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __lowercase ( _a , _a ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _snake_case ( self : str , lowercase_ : str , lowercase_ : Optional[Any] ):
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 264 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = cva.getAffineTransform(UpperCamelCase_ , UpperCamelCase_ )
return cva.warpAffine(UpperCamelCase_ , UpperCamelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__magic_name__ = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__magic_name__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__magic_name__, __magic_name__ = gray_img.shape
# set different points to rotate image
__magic_name__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__magic_name__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__magic_name__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__magic_name__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__magic_name__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__magic_name__ = plt.figure(1)
__magic_name__ = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 255 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE = primes[:idx]
break
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE = False
for r in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = pow(UpperCamelCase_ , d * 2**r , UpperCamelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 255 | 1 |
"""simple docstring"""
import operator as op
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Tuple = []
a : int = lambda A_ , A_ : int(x / y ) # noqa: E731 integer division operation
a : Tuple = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(A_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(A_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
else:
a : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
a : str = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " )
stack.append(
str(opr[x](int(A_ ) , int(A_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__lowercase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 40 |
"""simple docstring"""
import datasets
_a = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
_a = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
_a = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32'''),
}) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return {"accuracy": simple_accuracy(__a , __a)}
| 194 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.encodec")
lowercase : int = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowercase : str = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowercase : Tuple = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowercase : Dict = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowercase : Tuple = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowercase : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase : Any = []
lowercase : Optional[Any] = []
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Tuple:
for attribute in key.split('.' ):
_snake_case = getattr(__A , __A )
if weight_type is not None:
_snake_case = getattr(__A , __A ).shape
else:
_snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
elif weight_type == "running_mean":
_snake_case = value
elif weight_type == "running_var":
_snake_case = value
elif weight_type == "num_batches_tracked":
_snake_case = value
elif weight_type == "weight_ih_l0":
_snake_case = value
elif weight_type == "weight_hh_l0":
_snake_case = value
elif weight_type == "bias_ih_l0":
_snake_case = value
elif weight_type == "bias_hh_l0":
_snake_case = value
elif weight_type == "weight_ih_l1":
_snake_case = value
elif weight_type == "weight_hh_l1":
_snake_case = value
elif weight_type == "bias_ih_l1":
_snake_case = value
elif weight_type == "bias_hh_l1":
_snake_case = value
else:
_snake_case = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_snake_case , _snake_case = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
_snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
_snake_case = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(__A , __A ):
logger.info(F'{name} was ignored' )
continue
_snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_snake_case , _snake_case = key.split('.*.' )
if prefix in name and suffix in name:
_snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(__A )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , __A )
if "weight_g" in name:
_snake_case = 'weight_g'
elif "weight_v" in name:
_snake_case = 'weight_v'
elif "weight_ih_l0" in name:
_snake_case = 'weight_ih_l0'
elif "weight_hh_l0" in name:
_snake_case = 'weight_hh_l0'
elif "bias_ih_l0" in name:
_snake_case = 'bias_ih_l0'
elif "bias_hh_l0" in name:
_snake_case = 'bias_hh_l0'
elif "weight_ih_l1" in name:
_snake_case = 'weight_ih_l1'
elif "weight_hh_l1" in name:
_snake_case = 'weight_hh_l1'
elif "bias_ih_l1" in name:
_snake_case = 'bias_ih_l1'
elif "bias_hh_l1" in name:
_snake_case = 'bias_hh_l1'
elif "bias" in name:
_snake_case = 'bias'
elif "weight" in name:
_snake_case = 'weight'
elif "running_mean" in name:
_snake_case = 'running_mean'
elif "running_var" in name:
_snake_case = 'running_var'
elif "num_batches_tracked" in name:
_snake_case = 'num_batches_tracked'
else:
_snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> Dict:
if config_path is not None:
_snake_case = EncodecConfig.from_pretrained(__A )
else:
_snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_snake_case = [8, 5, 4, 4]
_snake_case = [2.2]
_snake_case = 64
_snake_case = 32_000
_snake_case = 2_048
_snake_case = False
_snake_case = False
_snake_case = False
elif model_name == "encodec_48khz":
_snake_case = [8, 5, 4, 2]
_snake_case = [3.0, 6.0, 1_2.0, 2_4.0]
_snake_case = 48_000
_snake_case = 2
_snake_case = False
_snake_case = 'time_group_norm'
_snake_case = True
_snake_case = 1.0
_snake_case = 0.0_1
else:
raise ValueError(F'Unknown model name: {model_name}' )
_snake_case = EncodecModel(__A )
_snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__A )
_snake_case = torch.load(__A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_snake_case = original_checkpoint['best_state']
recursively_load_weights(__A , __A , __A )
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__A )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 160 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : Any = logging.get_logger(__name__)
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
"""simple docstring"""
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__( self , lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
_snake_case = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_snake_case = text
def lowerCamelCase ( self ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
self.generated_responses.append(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
"""simple docstring"""
_snake_case = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_snake_case = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
_lowerCamelCase , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_=0 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = super().__call__(lowerCAmelCase_ , num_workers=lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=32 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_snake_case = self.tokenizer._build_conversation_input_ids(lowerCAmelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(lowerCAmelCase_ )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=10 , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = generate_kwargs.get('max_length' , self.model.config.max_length )
_snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs['attention_mask'][:, -trim:]
_snake_case = model_inputs.pop('conversation' )
_snake_case = max_length
_snake_case = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = model_outputs['output_ids']
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
_snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCAmelCase_ )
return conversation
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 160 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ort.SessionOptions()
lowercase = False
return options
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = '''A red cat sitting on a park bench'''
lowercase = np.random.RandomState(0 )
lowercase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
lowercase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = '''A red cat sitting on a park bench'''
lowercase = np.random.RandomState(0 )
lowercase = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 195 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=12 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=0 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =parent
UpperCAmelCase : Optional[int] =batch_size
UpperCAmelCase : List[Any] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Union[str, Any] =use_input_mask
UpperCAmelCase : Tuple =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : Tuple =hidden_size
UpperCAmelCase : Dict =projection_dim
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Dict =num_attention_heads
UpperCAmelCase : int =intermediate_size
UpperCAmelCase : Any =dropout
UpperCAmelCase : Union[str, Any] =attention_dropout
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =scope
UpperCAmelCase : str =bos_token_id
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int =None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase : Optional[int] =input_mask.numpy()
UpperCAmelCase , UpperCAmelCase : List[Any] =input_mask.shape
UpperCAmelCase : Optional[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : List[Any] =1
UpperCAmelCase : Tuple =0
UpperCAmelCase : List[Any] =self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =TFBlipTextModel(config=snake_case__ )
UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ , training=snake_case__ )
UpperCAmelCase : str =model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] =config_and_inputs
UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase : Dict = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Dict = False
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =BlipTextModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] =TFBlipTextModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__=True ) -> Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case__ )
| 348 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = tempfile.mkdtemp()
# fmt: off
__lowercase : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : Optional[int] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase : Optional[Any] = {"""unk_token""": """<unk>"""}
__lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
__lowercase : Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__lowercase : List[str] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowerCAmelCase ( self : Optional[Any] , **__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : str ) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a )
def lowerCAmelCase ( self : int , **__a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : List[str] = self.get_image_processor()
__lowercase : str = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowercase : Optional[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : Optional[Any] = self.get_image_processor(do_normalize=__a )
__lowercase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(__a , return_tensors="""np""" )
__lowercase : str = processor(images=__a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Tuple = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowercase : int = """lower newer"""
__lowercase : Optional[Any] = processor(text=__a , return_tensors="""np""" )
__lowercase : Optional[int] = tokenizer(__a , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = """lower newer"""
__lowercase : List[str] = self.prepare_image_inputs()
__lowercase : Dict = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = """google/owlvit-base-patch32"""
__lowercase : Dict = OwlViTProcessor.from_pretrained(__a )
__lowercase : str = ["""cat""", """nasa badge"""]
__lowercase : int = processor(text=__a )
__lowercase : Any = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : int = """google/owlvit-base-patch32"""
__lowercase : Union[str, Any] = OwlViTProcessor.from_pretrained(__a )
__lowercase : Dict = [["""cat""", """nasa badge"""], ["""person"""]]
__lowercase : List[str] = processor(text=__a )
__lowercase : int = 16
__lowercase : Tuple = len(__a )
__lowercase : List[Any] = max([len(__a ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = """google/owlvit-base-patch32"""
__lowercase : List[str] = OwlViTProcessor.from_pretrained(__a )
__lowercase : Optional[int] = ["""cat""", """nasa badge"""]
__lowercase : List[str] = processor(text=__a )
__lowercase : Optional[int] = 16
__lowercase : str = inputs["""input_ids"""]
__lowercase : Optional[int] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : List[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowercase : str = self.prepare_image_inputs()
__lowercase : Dict = self.prepare_image_inputs()
__lowercase : Any = processor(images=__a , query_images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : List[str] = OwlViTProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Optional[int] = processor.batch_decode(__a )
__lowercase : Any = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a ) | 351 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ ) -> Any:
__UpperCamelCase =parent
def _a ( self ) -> Optional[Any]:
return {}
def _UpperCAmelCase ( ):
__UpperCamelCase ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
__UpperCamelCase ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = MarkupLMFeatureExtractor if is_bsa_available() else None
def _a ( self ) -> str:
__UpperCamelCase =MarkupLMFeatureExtractionTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def _a ( self ) -> Tuple:
# Initialize feature_extractor
__UpperCamelCase =self.feature_extraction_class()
# Test not batched input
__UpperCamelCase =get_html_strings()[0]
__UpperCamelCase =feature_extractor(A_ )
# fmt: off
__UpperCamelCase =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
__UpperCamelCase =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
# Test batched
__UpperCamelCase =get_html_strings()
__UpperCamelCase =feature_extractor(A_ )
# fmt: off
__UpperCamelCase =expected_nodes + [['My First Heading', 'My first paragraph.']]
__UpperCamelCase =expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
| 62 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'spiece.model'}
lowercase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
lowercase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
lowercase__ = 0
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = """left"""
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__: List[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else mask_token
a__: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
a__: Tuple = 3
a__: List[Any] = do_lower_case
a__: List[str] = remove_space
a__: Optional[Any] = keep_accents
a__: Optional[int] = vocab_file
a__: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase)
@property
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return len(self.sp_model)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Dict = {self.convert_ids_to_tokens(lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> int:
'''simple docstring'''
a__: Optional[int] = self.__dict__.copy()
a__: List[Any] = None
return state
def __setstate__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__: Dict = {}
a__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCamelCase_ ( self , lowercase) -> str:
'''simple docstring'''
if self.remove_space:
a__: Optional[int] = ' '.join(inputs.strip().split())
else:
a__: int = inputs
a__: List[Any] = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
a__: Optional[int] = unicodedata.normalize('NFKD' , lowercase)
a__: Union[str, Any] = ''.join([c for c in outputs if not unicodedata.combining(lowercase)])
if self.do_lower_case:
a__: int = outputs.lower()
return outputs
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: Any = self.preprocess_text(lowercase)
a__: Optional[int] = self.sp_model.encode(lowercase , out_type=lowercase)
a__: List[str] = []
for piece in pieces:
if len(lowercase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
a__: Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
a__: List[str] = cur_pieces[1:]
else:
a__: Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowercase)
else:
new_pieces.append(lowercase)
return new_pieces
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase)
def lowerCamelCase_ ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase)
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = ''.join(lowercase).replace(lowercase , ' ').strip()
return out_string
def lowerCamelCase_ ( self , lowercase , lowercase = False , lowercase = None , lowercase = True , **lowercase , ) -> str:
'''simple docstring'''
a__: Dict = kwargs.pop('use_source_tokenizer' , lowercase)
a__: Dict = self.convert_ids_to_tokens(lowercase , skip_special_tokens=lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
a__: str = []
a__: List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase))
a__: Tuple = []
sub_texts.append(lowercase)
else:
current_sub_text.append(lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
a__: Any = ''.join(lowercase)
a__: str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
a__: Dict = self.clean_up_tokenization(lowercase)
return clean_text
else:
return text
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Optional[Any] = [self.sep_token_id]
a__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is not None:
return ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1, 1]
return ([0] * len(lowercase)) + [1, 1]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.sep_token_id]
a__: int = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
a__: List[str] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase)
elif not os.path.isfile(self.vocab_file):
with open(lowercase , 'wb') as fi:
a__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase)
return (out_vocab_file,)
| 203 | """simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__: int = data
a__: str = None
def __repr__( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = []
a__: Union[str, Any] = self
while temp:
string_rep.append(f'{temp.data}')
a__: Tuple = temp.next
return "->".join(lowercase)
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
if not elements_list:
raise Exception('The Elements List is empty' )
a__: Any = Node(elements_list[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
a__: Optional[Any] = Node(elements_list[i] )
a__: Tuple = current.next
return head
def __a ( _SCREAMING_SNAKE_CASE ) ->None:
if head_node is not None and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __a ( ) ->Optional[Any]:
from doctest import testmod
testmod()
a__: Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 203 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
SCREAMING_SNAKE_CASE__ = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class lowerCamelCase (A__ ):
def __init__( self : List[Any] , __UpperCAmelCase : VQModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1_0_0 , __UpperCAmelCase : Optional[float] = 0.0 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(__UpperCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__UpperCAmelCase )}""" )
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = preprocess(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__ = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image.to(device=self.device , dtype=__UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ = eta
for t in self.progress_bar(__UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__ = torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__ = self.vqvae.decode(__UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE__ = torch.clamp(__UpperCAmelCase , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 165 |
"""simple docstring"""
def A ( snake_case__ = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 165 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''gpt_neo'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_57 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=20_48 ,SCREAMING_SNAKE_CASE__=24 ,SCREAMING_SNAKE_CASE__=[[["global", "local"], 12]] ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__="gelu_new" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=5_02_56 ,SCREAMING_SNAKE_CASE__=5_02_56 ,**SCREAMING_SNAKE_CASE__ ,) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = num_layers
__SCREAMING_SNAKE_CASE :List[str] = num_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :str = window_size
__SCREAMING_SNAKE_CASE :Dict = activation_function
__SCREAMING_SNAKE_CASE :Tuple = resid_dropout
__SCREAMING_SNAKE_CASE :str = embed_dropout
__SCREAMING_SNAKE_CASE :str = attention_dropout
__SCREAMING_SNAKE_CASE :Tuple = classifier_dropout
__SCREAMING_SNAKE_CASE :Tuple = layer_norm_epsilon
__SCREAMING_SNAKE_CASE :List[Any] = initializer_range
__SCREAMING_SNAKE_CASE :Dict = use_cache
__SCREAMING_SNAKE_CASE :Optional[Any] = bos_token_id
__SCREAMING_SNAKE_CASE :List[str] = eos_token_id
__SCREAMING_SNAKE_CASE :List[Any] = attention_types
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.expand_attention_types_params(SCREAMING_SNAKE_CASE__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( a_ : List[Any] , a_ : str , a_ : List[str] , a_ : Tuple ) -> List[Any]:
import torch
__SCREAMING_SNAKE_CASE :str = input.size()
__SCREAMING_SNAKE_CASE :int = len(a_ )
__SCREAMING_SNAKE_CASE :Tuple = shape[dimension]
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.arange(0 , a_ , a_ )
__SCREAMING_SNAKE_CASE :int = torch.div(sizedim - size , a_ , rounding_mode='''floor''' ) + 1
__SCREAMING_SNAKE_CASE :Any = torch.arange(a_ ) + low_indices[:min_length][:, None]
__SCREAMING_SNAKE_CASE :List[str] = [slice(a_ )] * rank
__SCREAMING_SNAKE_CASE :List[Any] = indices
__SCREAMING_SNAKE_CASE :str = input[s]
__SCREAMING_SNAKE_CASE :Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a_ )
def __lowerCamelCase ( a_ : str , a_ : Union[str, Any] ) -> Any:
import torch
__SCREAMING_SNAKE_CASE :Dict = torch.arange(1 , a_ )
__SCREAMING_SNAKE_CASE :List[str] = torch.remainder(a_ , a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = remainders == 0
__SCREAMING_SNAKE_CASE :Any = candidates[divisor_indices]
__SCREAMING_SNAKE_CASE :Dict = torch.max(a_ )
return largest_divisor, torch.div(a_ , a_ , rounding_mode='''floor''' )
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
__SCREAMING_SNAKE_CASE :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = super(SCREAMING_SNAKE_CASE__ ,self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE :Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE :Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :Optional[int] = seqlen + 2
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE :int = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE :str = common_inputs['''attention_mask''']
if self.use_past:
__SCREAMING_SNAKE_CASE :Tuple = ordered_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 13
| 359 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCamelCase_ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
lowerCamelCase_ = 0
for log in Path().glob("*.log"):
lowerCamelCase_ = 0
with open(log, "r") as f:
for line in f:
lowerCamelCase_ = json.loads(line)
if line.get("nodeid", "") != "":
lowerCamelCase_ = line["nodeid"]
if line.get("duration", None) is not None:
lowerCamelCase_ = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase_ = []
log.unlink()
lowerCamelCase_ = ""
lowerCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase_ = []
lowerCamelCase_ = {}
for test in failed_tests:
lowerCamelCase_ = test[0].split("::")
lowerCamelCase_ = data[0].split("/")[-1]
if data[0] not in filesafailed:
lowerCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase_ = [test[0] for test in failed_table]
lowerCamelCase_ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase_ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCamelCase_ = "Too many failed tests, please see the full report in the Action results."
lowerCamelCase_ = len(err) + 1_0
lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
lowerCamelCase_ = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
lowerCamelCase_ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCamelCase_ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCamelCase_ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
lowerCamelCase_ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase_ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase_ = row[0]
else:
lowerCamelCase_ = ""
lowerCamelCase_ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
) | 239 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : list[int] ) -> bool:
'''simple docstring'''
return len(set(__lowercase ) ) == len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : int = tokenizer("Hello there" , return_tensors="np" ).input_ids
_UpperCAmelCase : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
_UpperCAmelCase : List[Any] = shift_tokens_right(A , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCAmelCase : Dict = model(A , decoder_input_ids=A ).logits
_UpperCAmelCase : Optional[Any] = optax.softmax_cross_entropy(A , onehot(A , logits.shape[-1] ) ).mean()
_UpperCAmelCase : Optional[Any] = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Dict = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 202 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = 'owlvit_text_model'
def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = initializer_factor
@classmethod
def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'owlvit'
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ):
super().__init__(**A )
if text_config is None:
_UpperCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : str = OwlViTTextConfig(**A )
_UpperCAmelCase : int = OwlViTVisionConfig(**A )
_UpperCAmelCase : Optional[Any] = projection_dim
_UpperCAmelCase : str = logit_scale_init_value
_UpperCAmelCase : Optional[Any] = return_dict
_UpperCAmelCase : str = 1.0
@classmethod
def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
@classmethod
def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : Dict = vision_config
return cls.from_dict(A , **A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : List[str] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case_ ( self : Optional[int] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case_ ( self : str ):
return 1e-4
def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ):
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : List[Any] ):
return 1_4
| 202 | 1 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A : str = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _lowerCamelCase ( _UpperCamelCase=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase__ ) )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =None
__UpperCAmelCase : List[str] =None
def snake_case ( self , __a , __a ):
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = dataset_module_factory(__a , cache_dir=__a )
__lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=__a )
__lowerCAmelCase = builder_cls(
cache_dir=__a , config_name=__a , hash=dataset_module.hash , )
__lowerCAmelCase = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__a ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
__lowerCAmelCase = cached_path(__a , cache_dir=__a )
self.assertTrue(os.path.exists(__a ) )
@pytest.mark.integration
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
__lowerCAmelCase = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
__lowerCAmelCase = import_main_class(dataset_module.module_path )
__lowerCAmelCase = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowerCAmelCase = None
builder_instance.download_and_prepare()
__lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
__lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
__lowerCAmelCase = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
__lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["train"] , _UpperCamelCase )
assert next(iter(ds["train"] ) )
| 57 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A, __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionInstructPixaPixPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self ) -> Any:
torch.manual_seed(0 )
A_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : int = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : List[Any] = CLIPTextModel(_lowerCamelCase )
A_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Union[str, Any]:
A_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : int = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : List[Any] = torch.manual_seed(_lowerCamelCase )
else:
A_ : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self ) -> str:
A_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
A_ : Union[str, Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : int = self.get_dummy_inputs(_lowerCamelCase )
A_ : Optional[Any] = sd_pipe(**_lowerCamelCase ).images
A_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
A_ : Any = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : int = self.get_dummy_inputs(_lowerCamelCase )
A_ : Tuple = """french fries"""
A_ : Optional[int] = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
A_ : Optional[Any] = output.images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Optional[Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
A_ : Union[str, Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
A_ : Optional[Any] = [inputs["""prompt"""]] * 2
A_ : Optional[Any] = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
A_ : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
A_ : Any = image / 2 + 0.5
A_ : Tuple = image.permute(0 , 3 , 1 , 2 )
A_ : List[str] = image.repeat(2 , 1 , 1 , 1 )
A_ : Optional[Any] = sd_pipe(**_lowerCamelCase ).images
A_ : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A_ : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
A_ : str = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
A_ : int = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
A_ : Union[str, Any] = sd_pipe(**_lowerCamelCase ).images
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Optional[int] = [round(_lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.get_dummy_components()
A_ : Tuple = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
A_ : Union[str, Any] = VaeImageProcessor(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase )
A_ : str = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[str] = pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type="""pt""" ) )[0]
A_ : List[str] = components["""vae"""]
A_ : str = self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A_ : str = vae.encode(inputs[image_param] ).latent_dist.mode()
A_ : Any = pipe(**_lowerCamelCase )[0]
A_ : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , _lowerCamelCase=0 ) -> Tuple:
A_ : List[str] = torch.manual_seed(_lowerCamelCase )
A_ : str = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
A_ : Union[str, Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
A_ : List[Any] = self.get_inputs()
A_ : Union[str, Any] = pipe(**_lowerCamelCase ).images
A_ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : List[str] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCamelCase )
A_ : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
A_ : int = self.get_inputs()
A_ : Dict = pipe(**_lowerCamelCase ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : List[str] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCamelCase )
A_ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
A_ : List[Any] = self.get_inputs()
A_ : List[Any] = pipe(**_lowerCamelCase ).images
A_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A_ : str = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = 0
def callback_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> None:
A_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A_ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ : Union[str, Any] = latents[0, -3:, -3:, -1]
A_ : Dict = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A_ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A_ : Tuple = latents[0, -3:, -3:, -1]
A_ : Any = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A_ : int = False
A_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
A_ : int = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Any = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : str = self.get_inputs()
A_ : Union[str, Any] = pipe(**_lowerCamelCase )
A_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase_ ( self ) -> Any:
A_ : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A_ : List[str] = inputs["""image"""].resize((504, 504) )
A_ : Tuple = """timbrooks/instruct-pix2pix"""
A_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
A_ : Optional[int] = pipe(**_lowerCamelCase )
A_ : List[str] = output.images[0]
A_ : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A_ : int = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 164 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A_ : str = self.unet.config.sample_size
A_ : Optional[int] = (batch_size, 3, img_size, img_size)
A_ : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Dict = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : Optional[Any] = self.scheduler.schedule[t]
A_ : Union[str, Any] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[Any] = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Any = self.scheduler.step_correct(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output["""derivative"""] , )
A_ : Tuple = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 164 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''onnx''']
def __init__( self : str , *A_ : Dict , **A_ : Union[str, Any]):
requires_backends(self , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A_ : List[str] , **A_ : Optional[Any]):
requires_backends(cls , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(cls , ['''onnx'''])
| 103 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 2_0_4_8,
}
class UpperCAmelCase (__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__: Optional[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowercase__: List[str] = add_prefix_space
lowercase__: Dict = pre_tok_class(**UpperCamelCase_ )
lowercase__: Optional[Any] = add_prefix_space
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Tuple = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
lowercase__: str = input_ids[-self.model_max_length :]
return input_ids
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
__A : int = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase : Stack[int] = Stack()
lowerCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_UpperCAmelCase )
elif i == ")":
# RULE 4
lowerCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase : Optional[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase : Any = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase : List[Any] = operators[opr](_UpperCAmelCase, _UpperCAmelCase )
operand_stack.push(_UpperCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A : Optional[int] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 138 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 138 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
if n_term == "":
return []
UpperCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(f"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
_lowercase : Optional[int] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 363 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A : str="" ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = AgentAudio(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1E-4 ) )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCAmelCase , lowerCAmelCase , 16000 )
UpperCAmelCase = AgentAudio(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase )
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase = AgentImage(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = '''Hey!'''
UpperCAmelCase = AgentText(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , agent_type.to_string() )
self.assertEqual(lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 91 | 0 |
import os
def _a ( SCREAMING_SNAKE_CASE : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as input_file:
UpperCamelCase__ : str = [
[int(UpperCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCamelCase__ : Optional[Any] = len(UpperCAmelCase__ )
UpperCamelCase__ : Any = len(matrix[0] )
UpperCamelCase__ : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
UpperCamelCase__ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
UpperCamelCase__ : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase__ ):
UpperCamelCase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCamelCase__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 146 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = 8
# DPR tok
_UpperCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def lowercase ( self : Dict ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase ( self : Dict ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase ( self : int ):
_UpperCAmelCase = os.path.join(self.tmpdirname , "rag_tokenizer" )
_UpperCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_UpperCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case_ )
rag_tokenizer.save_pretrained(snake_case_ )
_UpperCAmelCase = RagTokenizer.from_pretrained(snake_case_ , config=snake_case_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase ( self : Any ):
_UpperCAmelCase = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
_UpperCAmelCase = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_UpperCAmelCase = tokenizer(snake_case_ )
self.assertIsNotNone(snake_case_ )
@slow
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
_UpperCAmelCase = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
_UpperCAmelCase = tokenizer(snake_case_ )
self.assertIsNotNone(snake_case_ )
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : list ) -> list:
'''simple docstring'''
for i in range(len(__lowercase ) - 1 , 0 , -1 ):
_UpperCAmelCase = False
for j in range(__lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j]
_UpperCAmelCase = True
for j in range(__lowercase ):
if unsorted[j] > unsorted[j + 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j]
_UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE :Any = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 156 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( __a , __a , __a , __a , __a ) -> Dict:
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ :int = getattr(__a , __a )
if weight_type is not None:
UpperCamelCase__ :Tuple = getattr(__a , __a ).shape
else:
UpperCamelCase__ :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase__ :str = value
elif weight_type == "weight_g":
UpperCamelCase__ :int = value
elif weight_type == "weight_v":
UpperCamelCase__ :List[str] = value
elif weight_type == "bias":
UpperCamelCase__ :Union[str, Any] = value
else:
UpperCamelCase__ :Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a ( __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :str = fairseq_model.state_dict()
UpperCamelCase__ :List[Any] = hf_model.feature_extractor
UpperCamelCase__ :Dict = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase__ :Tuple = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ :Any = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__a , __a , __a , __a )
UpperCamelCase__ :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ :Tuple = True
if "*" in mapped_key:
UpperCamelCase__ :Any = name.split(__a )[0].split('''.''' )[-2]
UpperCamelCase__ :Optional[Any] = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
UpperCamelCase__ :List[Any] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ :Optional[Any] = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ :Tuple = '''bias'''
elif "weight" in name:
UpperCamelCase__ :Optional[Any] = '''weight'''
else:
UpperCamelCase__ :str = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a ( __a , __a , __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :int = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ :str = name.split('''.''' )
UpperCamelCase__ :int = int(items[0] )
UpperCamelCase__ :Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase__ :List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase__ :Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase__ :int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a , __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Tuple = full_name.split('''adaptor.''' )[-1]
UpperCamelCase__ :int = name.split('''.''' )
if items[1].isdigit():
UpperCamelCase__ :List[str] = int(items[1] )
else:
UpperCamelCase__ :List[Any] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
UpperCamelCase__ :int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
UpperCamelCase__ :Optional[Any] = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
UpperCamelCase__ :List[str] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__a , __a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
UpperCamelCase__ :Dict = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
UpperCamelCase__ :str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Any = emb.weight.shape
UpperCamelCase__ :int = nn.Linear(__a , __a , bias=__a )
UpperCamelCase__ :Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = WavaVecaConfig.from_pretrained(
__a , add_adapter=__a , adapter_stride=__a , adapter_kernel_size=__a , use_auth_token=__a , output_hidden_size=__a , )
UpperCamelCase__ :Dict = MBartConfig.from_pretrained(__a )
# load model
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
UpperCamelCase__ :List[str] = model[0].eval()
# load feature extractor
UpperCamelCase__ :Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(__a , use_auth_token=__a )
# set weights for wav2vec2 encoder
UpperCamelCase__ :List[str] = WavaVecaModel(__a )
recursively_load_weights_wavaveca(model.encoder , __a )
# load decoder weights
UpperCamelCase__ :Tuple = MBartForCausalLM(__a )
UpperCamelCase__ , UpperCamelCase__ :int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__a )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
UpperCamelCase__ :str = SpeechEncoderDecoderModel(encoder=__a , decoder=__a )
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :List[Any] = MBartaaTokenizer(__a )
tokenizer.save_pretrained(__a )
UpperCamelCase__ :Dict = hf_wavavec.config.to_dict()
UpperCamelCase__ :Optional[int] = tokenizer.pad_token_id
UpperCamelCase__ :Tuple = tokenizer.bos_token_id
UpperCamelCase__ :Union[str, Any] = tokenizer.eos_token_id
UpperCamelCase__ :Any = '''mbart50'''
UpperCamelCase__ :Optional[Any] = '''wav2vec2'''
UpperCamelCase__ :List[Any] = tokenizer.eos_token_id
UpperCamelCase__ :Optional[int] = 250004
UpperCamelCase__ :Tuple = tokenizer.eos_token_id
UpperCamelCase__ :int = SpeechEncoderDecoderConfig.from_dict(__a )
hf_wavavec.save_pretrained(__a )
feature_extractor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
) | 97 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_a : str= logging.get_logger(__name__)
_a : str= {"vocab_file": "spiece.model"}
_a : Tuple= {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
_a : int= {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
_a : Optional[int]= 0
_a : str= 1
_a : Tuple= 2
_a : str= 3
_a : Optional[Any]= 4
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : str = """left"""
def __init__(self : List[Any] , _A : List[str] , _A : int=False , _A : Tuple=True , _A : Optional[Any]=False , _A : List[Any]="<s>" , _A : Dict="</s>" , _A : str="<unk>" , _A : Optional[Any]="<sep>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<cls>" , _A : Dict="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : str = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
__snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Tuple = 3
__snake_case : Optional[int] = do_lower_case
__snake_case : Union[str, Any] = remove_space
__snake_case : Dict = keep_accents
__snake_case : str = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : Dict) -> List[str]:
return len(self.sp_model)
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : str = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Union[str, Any]) -> List[str]:
__snake_case : Optional[Any] = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> str:
__snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : List[Any] = {}
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Any , _A : Tuple) -> List[str]:
if self.remove_space:
__snake_case : List[Any] = ' '.join(inputs.strip().split())
else:
__snake_case : Tuple = inputs
__snake_case : int = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : str = unicodedata.normalize('NFKD' , _A)
__snake_case : Tuple = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : List[Any] , _A : str) -> List[str]:
__snake_case : int = self.preprocess_text(_A)
__snake_case : Dict = self.sp_model.encode(_A , out_type=_A)
__snake_case : Union[str, Any] = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : List[str] = cur_pieces[1:]
else:
__snake_case : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Any:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Tuple , _A : str) -> Optional[int]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : List[str] , _A : Dict) -> List[Any]:
__snake_case : str = ''.join(_A).replace(_A , ' ').strip()
return out_string
def _lowercase (self : Dict , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : str , ) -> str:
__snake_case : Tuple = kwargs.pop('use_source_tokenizer' , _A)
__snake_case : Tuple = self.convert_ids_to_tokens(_A , skip_special_tokens=_A)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case : List[str] = []
__snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
__snake_case : List[Any] = []
sub_texts.append(_A)
else:
current_sub_text.append(_A)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case : Optional[int] = ''.join(_A)
__snake_case : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case : str = self.clean_up_tokenization(_A)
return clean_text
else:
return text
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1]
return ([0] * len(_A)) + [1, 1]
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCAmelCase = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
_UpperCAmelCase = min(a__ ,a__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 369 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30 | 0 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Any = LxmertConfig.from_json_file(__UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : Optional[Any] = LxmertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 356 | '''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Union[str, Any] = 1
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ):
'''simple docstring'''
self.set_timesteps(snake_case_ )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase_ : int = 4
# running values
UpperCAmelCase_ : str = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = num_inference_steps
UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase_ : str = timesteps.to(snake_case_ )
UpperCAmelCase_ : Any = []
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase_ : Optional[Any] = timestep_index + 1
UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case_ )
if len(self.ets ) == 1:
UpperCAmelCase_ : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return sample
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.alphas[timestep_index]
UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index]
UpperCAmelCase_ : Any = self.alphas[prev_timestep_index]
UpperCAmelCase_ : Dict = self.betas[prev_timestep_index]
UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 )
UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 274 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 9, 14 # noqa: F841
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : int = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_ : List[Any] = mst(__snake_case )
UpperCAmelCase_ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_ : str = tuple(answer[:2] )
UpperCAmelCase_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 29 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 350 |
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.